diff --git a/.gitattributes b/.gitattributes index 28df5f900b358436f0267334b3e3e9af33f917ba..b3a0a26f562c50b2287207e9fbc3c4c8ff520ccd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -53,3 +53,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.jpg filter=lfs diff=lfs merge=lfs -text *.jpeg filter=lfs diff=lfs merge=lfs -text *.webp filter=lfs diff=lfs merge=lfs -text +zavodik/nodes/ComfyUI_tinyterraNodes-main/arial.ttf filter=lfs diff=lfs merge=lfs -text +zavodik/nodes/ComfyUI_tinyterraNodes/arial.ttf filter=lfs diff=lfs merge=lfs -text +zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/tinyterraNodes.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text +zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text +zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/.github/workflows/publish.yml b/zavodik/nodes/ComfyUI-Impact-Pack/.github/workflows/publish.yml new file mode 100644 index 0000000000000000000000000000000000000000..edb70ae1d5957ebd887d49b613970064f13614ff --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/.github/workflows/publish.yml @@ -0,0 +1,25 @@ +name: Publish to Comfy registry +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "pyproject.toml" + +permissions: + issues: write + +jobs: + publish-node: + name: Publish Custom Node to registry + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'ltdrdata' }} + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@v1 + with: + ## Add your own personal access token to your Github Repository secrets and reference it here. + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/.gitignore b/zavodik/nodes/ComfyUI-Impact-Pack/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a38504f62c87ff6d7b350153920a95e93ce31590 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/.gitignore @@ -0,0 +1,12 @@ +__pycache__ +*.ini +wildcards/** +.vscode/ +.idea/ +subpack +impact_subpack +*.txt +*.yaml +!requirements.txt +!LICENSE.txt +.claude/ \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/.gitmodules b/zavodik/nodes/ComfyUI-Impact-Pack/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..dbbc2e070a45bc6b72ba8492a28d9c29737ef044 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/.gitmodules @@ -0,0 +1,3 @@ +[submodule "subpack"] + path = subpack + url = https://github.com/ltdrdata/ComfyUI-Impact-Subpack diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/LICENSE.txt b/zavodik/nodes/ComfyUI-Impact-Pack/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..3877ae0a7ff6f94ac222fd704e112723db776114 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/README.md b/zavodik/nodes/ComfyUI-Impact-Pack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e879872c325b5a8b30a0be5c5eea6a7df36149b --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/README.md @@ -0,0 +1,519 @@ +[](https://www.youtube.com/watch?v=AccoxDZIg3Y&list=PL_Ej2RDzjQLGfEeizq4GISeY3FtVyFmGP) + +# ComfyUI-Impact-Pack + +**Custom node pack for ComfyUI** +This node pack helps to conveniently enhance images through Detector, Detailer, Upscaler, Pipe, and more. + +NOTE: The UltralyticsDetectorProvider node is not part of the ComfyUI-Impact-Pack. To use the UltralyticsDetectorProvider node, please install the ComfyUI-Impact-Subpack separately. + +## NOTICE +* V8.24: This compatibility patch requires ComfyUI version 0.3.63 or higher due to structural changes in DifferentialDiffusion. +* V8.19: legacy nodes (mmdet and etc.) are removed +* V8.18: Support [facebookresearch/sam2](https://github.com/facebookresearch/sam2) models +* V8.0: The `Impact Subpack` is no longer installed automatically. To use `UltralyticsDetectorProvider` nodes, please install the `Impact Subpack` separately. +* V7.6: Automatic installation is no longer supported. Please install using ComfyUI-Manager, or manually install requirements.txt and run install.py to complete the installation. +* V7.0: Supports Switch based on Execution Model Inversion. +* V6.0: Supports FLUX.1 model in Impact KSampler, Detailers, PreviewBridgeLatent +* V5.0: It is no longer compatible with versions of ComfyUI before 2024.04.08. +* V4.87.4: Update to a version of ComfyUI after 2024.04.08 for proper functionality. +* V4.85: Incompatible with the outdated **ComfyUI IPAdapter Plus**. (A version dated March 24th or later is required.) +* V4.77: Compatibility patch applied. Requires ComfyUI version (Oct. 8th) or later. +* V4.73.3: ControlNetApply (SEGS) supports AnimateDiff +* V4.20.1: Due to the feature update in `RegionalSampler`, the parameter order has changed, causing malfunctions in previously created `RegionalSamplers`. Please adjust the parameters accordingly. +* V4.12: `MASKS` is changed to `MASK`. +* V4.7.2 isn't compatible with old version of `ControlNet Auxiliary Preprocessor`. If you will use `MediaPipe FaceMesh to SEGS` update to latest version(Sep. 17th). +* Selection weight syntax is changed(: -> ::) since V3.16. ([tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcardProcessor.md)) +* Starting from V3.6, requires latest version(Aug 8, 9ccc965) of ComfyUI. +* **In versions below V3.3.1, there was an issue with the image quality generated after using the UltralyticsDetectorProvider. Please make sure to upgrade to a newer version.** +* Starting from V3.0, nodes related to `mmdet` are optional nodes that are activated only based on the configuration settings. + - Through ComfyUI-Impact-Subpack, you can utilize UltralyticsDetectorProvider to access various detection models. +* Between versions 2.22 and 2.21, there is partial compatibility loss regarding the Detailer workflow. If you continue to use the existing workflow, errors may occur during execution. An additional output called "enhanced_alpha_list" has been added to Detailer-related nodes. +* The permission error related to cv2 that occurred during the installation of Impact Pack has been patched in version 2.21.4. However, please note that the latest versions of ComfyUI and ComfyUI-Manager are required. +* The "PreviewBridge" feature may not function correctly on ComfyUI versions released before July 1, 2023. +* Attempting to load the "ComfyUI-Impact-Pack" on ComfyUI versions released before June 27, 2023, will result in a failure. +* With the addition of wildcard support in FaceDetailer, the structure of DETAILER_PIPE-related nodes and Detailer nodes has changed. There may be malfunctions when using the existing workflow. + + +## How To Install + +### **Recommended** +* Install via [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager). + +### **Manual** +* Navigate to `ComfyUI/custom_nodes` in your terminal (cmd). +* Clone the repository under the `custom_nodes` directory using the following command: + ``` + git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack comfyui-impact-pack + cd comfyui-impact-pack + ``` +* Install dependencies in your Python environment. + * For Windows Portable, run the following command inside `ComfyUI\custom_nodes\comfyui-impact-pack`: + ``` + ..\..\..\python_embeded\python.exe -m pip install -r requirements.txt + ``` + * If using venv or conda, activate your Python environment first, then run: + ``` + pip install -r requirements.txt + ``` + +### Companion Pack +* If you need the `Ultralytics Detector Provider` to use various YOLO detection models, you should also install [ComfyUI-Impact-Subpack](https://github.com/ltdrdata/ComfyUI-Impact-Subpack). + + +## Custom Nodes +### [Detector nodes](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/detectors.md) + * `SAMLoader (Impact)` - Loads the SAM model. + * `ONNXDetectorProvider` - Loads the ONNX model to provide BBOX_DETECTOR. + * `CLIPSegDetectorProvider` - Wrapper for CLIPSeg to provide BBOX_DETECTOR. + * You need to install the ComfyUI-CLIPSeg node extension. + * `SEGM Detector (combined)` - Detects segmentation and returns a mask from the input image. + * `BBOX Detector (combined)` - Detects bounding boxes and returns a mask from the input image. + * `SAMDetector (combined)` - Utilizes the SAM technology to extract the segment at the location indicated by the input SEGS on the input image and outputs it as a unified mask. + * `SAMDetector (Segmented)` - It is similar to `SAMDetector (combined)`, but it separates and outputs the detected segments. Multiple segments can be found for the same detected area, and currently, a policy is in place to group them arbitrarily in sets of three. This aspect is expected to be improved in the future. + * As a result, it outputs the `combined_mask`, which is a unified mask, and `batch_masks`, which are multiple masks grouped together in batch form. + * While `batch_masks` may not be completely separated, it provides functionality to perform some level of segmentation. + * `Simple Detector (SEGS)` - Operating primarily with `BBOX_DETECTOR`, and with the additional provision of `SAM_MODEL` or `SEGM_DETECTOR`, this node internally generates improved SEGS through mask operations on both *bbox* and *silhouette*. It serves as a convenient tool to simplify a somewhat intricate workflow. + * `Simple Detector for Video (SEGS)` – Performs detection on videos composed of image frames. Instead of using a single mask, it performs detection individually on each image frame and generates a SEGS object with a batch of masks. + * `SAM2 Video Detector (SEGS)` – Similar to `Simple Detector for Video (SEGS)`, but utilizes SAM2’s video tracking technology to generate a SEGS object with a batch of masks. + * To use this node, you must select a SAM2 model in the SAMLoader. + + +### ControlNet, IPAdapter + * `ControlNetApply (SEGS)` - To apply ControlNet in SEGS, you need to use the Preprocessor Provider node from the Inspire Pack to utilize this node. + * `segs_preprocessor` and `control_image` can be selectively applied. If a `control_image` is given, `segs_preprocessor` will be ignored. + * If set to `control_image`, you can preview the cropped cnet image through `SEGSPreview (CNET Image)`. Images generated by `segs_preprocessor` should be verified through the `cnet_images` output of each Detailer. + * The `segs_preprocessor` operates by applying preprocessing on-the-fly based on the cropped image during the detailing process, while `control_image` will be cropped and used as input to `ControlNetApply (SEGS)`. + * `ControlNetClear (SEGS)` - Clear applied ControlNet in SEGS + * `IPAdapterApply (SEGS)` - To apply IPAdapter in SEGS, you need to use the Preprocessor Provider node from the Inspire Pack to utilize this node. + + +### Mask operation + * `Pixelwise(SEGS & SEGS)` - Performs a 'pixelwise and' operation between two SEGS. + * `Pixelwise(SEGS - SEGS)` - Subtracts one SEGS from another. + * `Pixelwise(SEGS & MASK)` - Performs a pixelwise AND operation between SEGS and MASK. + * `Pixelwise(SEGS & MASKS ForEach)` - Performs a pixelwise AND operation between SEGS and MASKS. + * Please note that this operation is performed with batches of MASKS, not just a single MASK. + * `Pixelwise(MASK & MASK)` - Performs a 'pixelwise and' operation between two masks. + * `Pixelwise(MASK - MASK)` - Subtracts one mask from another. + * `Pixelwise(MASK + MASK)` - Combine two masks. + * `SEGM Detector (SEGS)` - Detects segmentation and returns SEGS from the input image. + * `BBOX Detector (SEGS)` - Detects bounding boxes and returns SEGS from the input image. + * `Dilate Mask` - Dilate Mask. + * Support erosion for negative value. + * `Gaussian Blur Mask` - Apply Gaussian Blur to Mask. You can utilize this for mask feathering. + * `Mask Rect Area` - Create a rectangular mask defined by percentages with preview canvas. + * `Mask Rect Area (Advanced)` - Create a rectangular mask defined by pixels and image size. + + +### [Detailer nodes](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/detailers.md) + * `Detailer (SEGS)` - Refines the image based on SEGS. + * `Detailer (SEGS) with auto retry` - Refines the image based on SEGS and will automatically retry if the patch is all black. + * `DetailerDebug (SEGS)` - Refines the image based on SEGS. Additionally, it provides the ability to monitor the cropped image and the refined image of the cropped image. + * To prevent regeneration caused by the seed that does not change every time when using 'external_seed', please disable the 'seed random generate' option in the 'Detailer...' node. + * `MASK to SEGS` - Generates SEGS based on the mask. + * `MASK to SEGS For Video` - Generates SEGS based on the mask for Video. (Renamed from `MASK to SEGS For AnimateDiff`) + * When using a single mask, convert it to SEGS to apply it to the entire frame. + * When using a batch mask, the contour fill feature is disabled. + * `MediaPipe FaceMesh to SEGS` - Separate each landmark from the mediapipe facemesh image to create labeled SEGS. + * Usually, the size of images created through the MediaPipe facemesh preprocessor is downscaled. It resizes the MediaPipe facemesh image to the original size given as reference_image_opt for matching sizes during processing. + * `ToBinaryMask` - Separates the mask generated with alpha values between 0 and 255 into 0 and 255. The non-zero parts are always set to 255. + * `Masks to Mask List` - This node converts the MASKS in batch form to a list of individual masks. + * `Mask List to Masks` - This node converts the MASK list to MASK batch form. + * `EmptySEGS` - Provides an empty SEGS. + * `MaskPainter` - Provides a feature to draw masks. + * `FaceDetailer` - Easily detects faces and improves them. + * `FaceDetailer (pipe)` - Easily detects faces and improves them (for multipass). + * `MaskDetailer (pipe)` - This is a simple inpaint node that applies the Detailer to the mask area. + + * `FromDetailer (SDXL/pipe)`, `BasicPipe -> DetailerPipe (SDXL)`, `Edit DetailerPipe (SDXL)` - These are pipe functions used in Detailer for utilizing the refiner model of SDXL. + * `Any PIPE -> BasicPipe` - Convert the PIPE Value of other custom nodes that are not BASIC_PIPE but internally have the same structure as BASIC_PIPE to BASIC_PIPE. If an incompatible type is applied, it may cause runtime errors. + + +### SEGS Manipulation nodes + * `SEGSDetailer` - Performs detailed work on SEGS without pasting it back onto the original image. + * `SEGSPaste` - Pastes the results of SEGS onto the original image. + * If `ref_image_opt` is present, the images contained within SEGS are ignored. Instead, the image within `ref_image_opt` corresponding to the crop area of SEGS is taken and pasted. The size of the image in `ref_image_opt` should be the same as the original image size. + * This node can be used in conjunction with the processing results of AnimateDiff. + * `SEGSPreview` - Provides a preview of SEGS. + * This option is used to preview the improved image through `SEGSDetailer` before merging it into the original. Prior to going through ```SEGSDetailer```, SEGS only contains mask information without image information. If fallback_image_opt is connected to the original image, SEGS without image information will generate a preview using the original image. However, if SEGS already contains image information, fallback_image_opt will be ignored. + * This node can be used in conjunction with the processing results of AnimateDiff. + * `SEGSPreview (CNET Image)` - Show images configured with `ControlNetApply (SEGS)` for debugging purposes. + * `SEGSToImageList` - Convert SEGS To Image List + * `SEGSToMaskList` - Convert SEGS To Mask List + * `SEGS Filter (label)` - This node filters SEGS based on the label of the detected areas. + * `SEGS Filter (ordered)` - This node sorts SEGS based on size and position and retrieves SEGs within a certain range. + * `SEGS Filter (range)` - This node retrieves only SEGs from SEGS that have a size and position within a certain range. + * `SEGS Filter (non max suppression)` - This node filters SEGS by removing those with high overlap based on the Intersection over Union (IoU) threshold, keeping only the most confident detections. + * `SEGS Filter (intersection)` - This node filters segs1, keeping only the SEGS that do not significantly overlap with any SEGS in segs2, based on the Intersection over Area (IoA) threshold. + * `SEGS Assign (label)` - Assign labels sequentially to SEGS. This node is useful when used with `[LAB]` of FaceDetailer. + * `SEGSConcat` - Concatenate segs1 and segs2. If source shape of segs1 and segs2 are different from segs2 will be ignored. + * `SEGS Merge` - SEGS contains multiple SEGs. SEGS Merge integrates several SEGs into a single merged SEG. The label is changed to `merged` and the confidence becomes the minimum confidence. The applied controlnet and cropped_image are removed. + * `Picker (SEGS)` - Among the input SEGS, you can select a specific SEG through a dialog. If no SEG is selected, it outputs an empty SEGS. Increasing the batch_size of SEGSDetailer can be used for the purpose of selecting from the candidates. + * `Set Default Image For SEGS` - Set a default image for SEGS. SEGS with images set this way do not need to have a fallback image set. When override is set to false, the original image is preserved. + * `Remove Image from SEGS` - Remove the image set for the SEGS that has been configured by "Set Default Image for SEGS" or SEGSDetailer. When the image for the SEGS is removed, the Detailer node will operate based on the currently processed image instead of the SEGS. + * `Make Tile SEGS` - [experimental] Create SEGS in the form of tiles from an image to facilitate experiments for Tiled Upscale using the Detailer. + * The `filter_in_segs_opt` and `filter_out_segs_opt` are optional inputs. If these inputs are provided, when creating the tiles, the mask for each tile is generated by overlapping with the mask of `filter_in_segs_opt` and excluding the overlap with the mask of `filter_out_segs_opt`. Tiles with an empty mask will not be created as SEGS. + * `Dilate Mask (SEGS)` - Dilate/Erosion Mask in SEGS + * `Gaussian Blur Mask (SEGS)` - Apply Gaussian Blur to Mask in SEGS + * `SEGS_ELT Manipulation` - experimental nodes + * `DecomposeSEGS` - Decompose SEGS to allow for detailed manipulation. + * `AssembleSEGS` - Reassemble the decomposed SEGS. + * `From SEG_ELT` - Extract detailed information from SEG_ELT. + * `Edit SEG_ELT` - Modify some of the information in SEG_ELT. + * `Dilate SEG_ELT` - Dilate the mask of SEG_ELT. + * `From SEG_ELT` bbox - Extract coordinate from bbox in SEG_ELT + * `From SEG_ELT` crop_region - Extract coordinate from crop_region in SEG_ELT + * `Count Elt in SEGS` - Number of Elts ins SEGS + + +### Pipe nodes + * `ToDetailerPipe`, `FromDetailerPipe` - These nodes are used to bundle multiple inputs used in the detailer, such as models and vae, ..., into a single DETAILER_PIPE or extract the elements that are bundled in the DETAILER_PIPE. + * `ToBasicPipe`, `FromBasicPipe` - These nodes are used to bundle model, clip, vae, positive conditioning, and negative conditioning into a single BASIC_PIPE, or extract each element from the BASIC_PIPE. + * `EditBasicPipe`, `EditDetailerPipe` - These nodes are used to replace some elements in BASIC_PIPE or DETAILER_PIPE. + * `FromDetailerPipe_v2`, `FromBasicPipe_v2` - It has the same functionality as `FromDetailerPipe` and `FromBasicPipe`, but it has an additional output that directly exports the input pipe. It is useful when editing EditBasicPipe and EditDetailerPipe. +* `Latent Scale (on Pixel Space)` - This node converts latent to pixel space, upscales it, and then converts it back to latent. + * If upscale_model_opt is provided, it uses the model to upscale the pixel and then downscales it using the interpolation method provided in scale_method to the target resolution. +* `PixelKSampleUpscalerProvider` - An upscaler is provided that converts latent to pixels using VAEDecode, performs upscaling, converts back to latent using VAEEncode, and then performs k-sampling. This upscaler can be attached to nodes such as `Iterative Upscale` for use. + * Similar to `Latent Scale (on Pixel Space)`, if upscale_model_opt is provided, it performs pixel upscaling using the model. +* `PixelTiledKSampleUpscalerProvider` - It is similar to `PixelKSampleUpscalerProvider`, but it uses `ComfyUI_TiledKSampler` and Tiled VAE Decoder/Encoder to avoid GPU VRAM issues at high resolutions. + * You need to install the [BlenderNeko/ComfyUI_TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) node extension. + + +### PK_HOOK + * `DenoiseScheduleHookProvider` - IterativeUpscale provides a hook that gradually changes the denoise to target_denoise as the iterative-step progresses. + * `CfgScheduleHookProvider` - IterativeUpscale provides a hook that gradually changes the cfg to target_cfg as the iterative-step progresses. + * `StepsScheduleHookProvider` - IterativeUpscale provides a hook that gradually changes the sampling-steps to target_steps as the iterative-step progresses. + * `NoiseInjectionHookProvider` - During each iteration of IterativeUpscale, noise is injected into the latent space while varying the strength according to a schedule. + * You need to install the [BlenderNeko/ComfyUI_Noise](https://github.com/BlenderNeko/ComfyUI_Noise) node extension. + * The seed serves as the initial value required for generating noise, and it increments by 1 with each iteration as the process unfolds. + * The source determines the types of CPU noise and GPU noise to be configured. + * Currently, there is only a simple schedule available, where the strength of the noise varies from start_strength to end_strength during the progression of each iteration. + * `UnsamplerHookProvider` - Apply Unsampler during each iteration. To use this node, ComfyUI_Noise must be installed. + * `PixelKSampleHookCombine` - This is used to connect two PK_HOOKs. hook1 is executed first and then hook2 is executed. + * If you want to simultaneously change cfg and denoise, you can combine the PK_HOOKs of CfgScheduleHookProvider and PixelKSampleHookCombine. + + +### DETAILER_HOOK + * `NoiseInjectionDetailerHookProvider` - The `detailer_hook` is a hook in the `Detailer` that injects noise during the processing of each SEGS. + * `UnsamplerDetailerHookProvider` - Apply Unsampler during each cycle. To use this node, ComfyUI_Noise must be installed. + * `DenoiseSchedulerDetailerHookProvider` - During the progress of the cycle, the detailer's denoise is altered up to the `target_denoise`. + * `CoreMLDetailerHookProvider` - CoreML supports only 512x512, 512x768, 768x512, 768x768 size sampling. CoreMLDetailerHookProvider precisely fixes the upscale of the crop_region to this size. When using this hook, it will always be selected size, regardless of the guide_size. However, if the guide_size is too small, skipping will occur. + * `DetailerHookCombine` - This is used to connect two DETAILER_HOOKs. Similar to PixelKSampleHookCombine. + * `SEGSOrderedFilterDetailerHook`, SEGSRangeFilterDetailerHook, SEGSLabelFilterDetailerHook - There are a wrapper node that provides SEGSFilter nodes to be applied in FaceDetailer or Detector by creating DETAILER_HOOK. + * `PreviewDetailerHook` - Connecting this hook node helps provide assistance for viewing previews whenever SEGS Detailing tasks are completed. When working with a large number of SEGS, such as Make Tile SEGS, it allows for monitoring the situation as improvements progress incrementally. + * Since this is the hook applied when pasting onto the original image, it has no effect on nodes like `SEGSDetailer`. + * `VariationNoiseDetailerHookProvider` - Apply variation seed to the detailer. It can be applied in multiple stages through combine. + * `CustomSamplerDetailerHookProvider` - Apply a hook that allows you to use a custom sampler in the Detailer nodes. When using `DetailerHookCombine`, the sampler from the first hook is applied. + * `LamaRemoverDetailerHookProvider` – Applies Lama Remover to the upscaled image during the detailing stage. If `skip_sampling` is set to True, Lama Remover can be used alone without the detailing stage, allowing it to simply remove detected regions. + * Not applicable for **AnimateDiff** detailers. When using `DetailerHookCombine`, `skip_sampling` is only applied if it is set to `True` for all hooks. + * To use this node, the node pack at [Layer-norm/comfyui-lama-remover](https://github.com/Layer-norm/comfyui-lama-remover) must be installed. + + +### Iterative Upscale nodes + * `Iterative Upscale (Latent/on Pixel Space)` - The upscaler takes the input upscaler and splits the scale_factor into steps, then iteratively performs upscaling. + This takes latent as input and outputs latent as the result. + * `Iterative Upscale (Image)` - The upscaler takes the input upscaler and splits the scale_factor into steps, then iteratively performs upscaling. This takes image as input and outputs image as the result. + * Internally, this node uses 'Iterative Upscale (Latent)'. + + +### TwoSamplers nodes +* `TwoSamplersForMask` - This node can apply two samplers depending on the mask area. The base_sampler is applied to the area where the mask is 0, while the mask_sampler is applied to the area where the mask is 1. + * Note: The latent encoded through VAEEncodeForInpaint cannot be used. +* `KSamplerProvider` - This is a wrapper that enables KSampler to be used in TwoSamplersForMask TwoSamplersForMaskUpscalerProvider. +* `TiledKSamplerProvider` - ComfyUI_TiledKSampler is a wrapper that provides KSAMPLER. + * You need to install the [BlenderNeko/ComfyUI_TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) node extension. + +* `TwoAdvancedSamplersForMask` - TwoSamplersForMask is similar to TwoAdvancedSamplersForMask, but they differ in their operation. TwoSamplersForMask performs sampling in the mask area only after all the samples in the base area are finished. On the other hand, TwoAdvancedSamplersForMask performs sampling in both the base area and the mask area sequentially at each step. +* `KSamplerAdvancedProvider` - This is a wrapper that enables KSampler to be used in TwoAdvancedSamplersForMask, RegionalSampler. + * sigma_factor: By multiplying the denoise schedule by the sigma_factor, you can adjust the amount of denoising based on the configured denoise. + +* `TwoSamplersForMaskUpscalerProvider` - This is an Upscaler that extends TwoSamplersForMask to be used in Iterative Upscale. + * TwoSamplersForMaskUpscalerProviderPipe - pipe version of TwoSamplersForMaskUpscalerProvider. + + +### Image Utils + * `PreviewBridge (image)` - This custom node can be used with a bridge for image when using the MaskEditor feature of Clipspace. + * `PreviewBridge (latent)` - This custom node can be used with a bridge for latent image when using the MaskEditor feature of Clipspace. + * If a latent with a mask is provided as input, it displays the mask. Additionally, the mask output provides the mask set in the latent. + * If a latent without a mask is provided as input, it outputs the original latent as is, but the mask output provides an output with the entire region set as a mask. + * When set mask through MaskEditor, a mask is applied to the latent, and the output includes the stored mask. The same mask is also output as the mask output. + * When connected to `vae_opt`, it takes higher priority than the `preview_method`. + * `ImageSender`, `ImageReceiver` - The images generated in ImageSender are automatically sent to the ImageReceiver with the same link_id. + * `LatentSender`, `LatentReceiver` - The latent generated in LatentSender are automatically sent to the LatentReceiver with the same link_id. + * Furthermore, LatentSender is implemented with PreviewLatent, which stores the latent in payload form within the image thumbnail. + * Due to the current structure of ComfyUI, it is unable to distinguish between SDXL latent and SD1.5/SD2.1 latent. Therefore, it generates thumbnails by decoding them using the SD1.5 method. + + +### Switch nodes + * `Switch (image,mask)`, `Switch (latent)`, `Switch (SEGS)` - Among multiple inputs, it selects the input designated by the selector and outputs it. The first input must be provided, while the others are optional. However, if the input specified by the selector is not connected, an error may occur. + * `Switch (Any)` - This is a Switch node that takes an arbitrary number of inputs and produces a single output. Its type is determined when connected to any node, and connecting inputs increases the available slots for connections. + * `Inversed Switch (Any)` - In contrast to `Switch (Any)`, it takes a single input and outputs one of many. + * NOTE: See this [tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/switch.md) + + +### [Wildcards](http://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md) nodes + * These are nodes that supports syntax in the form of `__wildcard-name__` and dynamic prompt syntax like `{a|b|c}`. + * Wildcard files can be used by placing `.txt` or `.yaml` files under either `ComfyUI-Impact-Pack/wildcards` or `ComfyUI-Impact-Pack/custom_wildcards` paths. + * You can download and use [Wildcard YAML](https://civitai.com/models/138970/billions-of-wildcards-all-in-one) files in this format. + * After the first execution, you can change the custom wildcards path in the `custom_wildcards` entry within the `ComfyUI-Impact-Pack/impact-pack.ini` file created. + * `ImpactWildcardProcessor` - The text is generated by processing the wildcard in the Text. If the mode is set to "populate", a dynamic prompt is generated with each execution and the input is filled in the second textbox. If the mode is set to "fixed", the content of the second textbox remains unchanged. + * When an image is generated with the "fixed" mode, the prompt used for that particular generation is stored in the metadata. + * `ImpactWildcardEncode` - Similar to ImpactWildcardProcessor, this provides the loading functionality of LoRAs (e.g. ``). Populated prompts are encoded using the clip after all the lora loading is done. + * If the `Inspire Pack` is installed, you can use **Lora Block Weight** in the form of `LBW=lbw spec;` + * ``, ``, `` + + +### Regional Sampling + * These nodes offer the capability to divide regions and perform partial sampling using a mask. Unlike TwoSamplersForMask, sampling for each region is applied during each step. + * `RegionalPrompt` - This node combines a **mask** for specifying regions and the **sampler** to apply to each region to create `REGIONAL_PROMPTS`. + * `CombineRegionalPrompts` - Combine multiple `REGIONAL_PROMPTS` to create a single `REGIONAL_PROMPTS`. + * `RegionalSampler` - This node performs sampling using a base sampler and regional prompts. Sampling by the base sampler is executed at each step, while sampling for each region is performed through the sampler bound to each region. + * overlap_factor - Specifies the amount of overlap for each region to blend well with the area outside the mask. + * restore_latent - When sampling each region, restore the areas outside the mask to the base latent, preventing additional noise from being introduced outside the mask during region sampling. + * `RegionalSamplerAdvanced` - This is the Advanced version of the RegionalSampler. You can control it using `step` instead of `denoise`. + > NOTE: The `sde` sampler and `uni_pc` sampler introduce additional noise during each step of the sampling process. To mitigate this, when sampling each region, the `uni_pc` sampler applies additional `dpmpp_fast`, and the sde sampler applies the `dpmpp_2m` sampler as an additional measure. + + +### Impact KSampler + * These samplers support basic_pipe and AYS/OSS/GITS scheduler + * `KSampler (pipe)` - pipe version of KSampler + * `KSampler (advanced/pipe)` - pipe version of KSamplerAdvacned + * When converting the scheduler widget to input, refer to the `Impact Scheduler Adapter` node to resolve compatibility issues. + * `GITSScheduler Func Provider` - provider scheduler function for GITSScheduler + + +### Batch/List Util + * `Image Batch to Image List` - Convert Image batch to Image List + - You can use images generated in a multi batch to handle them + * `Image List to Image Batch` - Convert Image List to Image Batch + * `Make Image List` - Convert multiple images into a single image list + * `Make Image Batch` - Convert multiple images into a single image batch + - The input of images can be scaled up as needed + * `Masks to Mask List`, `Mask List to Masks`, `Make Mask List`, `Make Mask Batch` - It has the same functionality as the nodes above, but uses mask as input instead of image. + * `Flatten Mask Batch` - Flattens a Mask Batch into a single Mask. Normal operation is not guaranteed for non-binary masks. + * `Make List (Any)` - Create a list with arbitrary values. + * `Select Nth Item (Any list)` - Selects the Nth item from a list. If the index is out of range, it returns the last item in the list. + + +### Logics (experimental) + * These nodes are experimental nodes designed to implement the logic for loops and dynamic switching. + * `ImpactCompare`, `ImpactConditionalBranch`, `ImpactConditionalBranchSelMode`, `ImpactInt`, `ImpactBoolean`, `ImpactValueSender`, `ImpactValueReceiver`, `ImpactImageInfo`, `ImpactMinMax`, `ImpactNeg`, `ImpactConditionalStopIteration` + * `ImpactIsNotEmptySEGS` - This node returns `true` only if the input SEGS is not empty. + * `ImpactIfNone` - Returns `true` if any_input is None, and returns `false` if it is not None. + * `Queue Trigger` - When this node is executed, it adds a new queue to assist with repetitive tasks. It will only execute if the signal's status changes. + * `Queue Trigger (Countdown)` - Like the Queue Trigger, it adds a queue, but only adds it if it's greater than 1, and decrements the count by one each time it runs. + * `Sleep` - Waits for the specified time (in seconds). + * `Set Widget Value` - This node sets one of the optional inputs to the specified node's widget. An error may occur if the types do not match. + * `Set Mute State` - This node changes the mute state of a specific node. + * `Control Bridge` - This node modifies the state of the connected control nodes based on the `mode` and `behavior` . If there are nodes that require a change, the current execution is paused, the mute status is updated, and a new prompt queue is inserted. + * When the `mode` is `active`, it makes the connected control nodes active regardless of the behavior. + * When the `mode` is `Bypass/Mute`, it changes the state of the connected nodes based on whether the behavior is `Bypass` or `Mute`. + * **Limitation**: Due to these characteristics, it does not function correctly when the batch count exceeds 1. Additionally, it does not guarantee proper operation when the seed is randomized or when the state of nodes is altered by actions such as `Queue Trigger`, `Set Widget Value`, `Set Mute`, before the Control Bridge. + * When utilizing this node, please structure the workflow in such a way that `Queue Trigger`, `Set Widget Value`, `Set Mute State`, and similar actions are executed at the end of the workflow. + * If you want to change the value of the seed at each iteration, please ensure that Set Widget Value is executed at the end of the workflow instead of using randomization. + * It is not a problem if the seed changes due to randomization as long as it occurs after the Control Bridge section. + * `Remote Boolean (on prompt)`, `Remote Int (on prompt)` - At the start of the prompt, this node forcibly sets the `widget_value` of `node_id`. It is disregarded if the target widget type is different. + * You can find the `node_id` by checking through [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager) using the format `Badge: #ID Nickname`. + * Experimental set of nodes for implementing loop functionality (tutorial to be prepared later / [example workflow](test/loop-test.json)). + + +### Limitation +* Many nodes in the `Impact Pack` use a wildcard type to allow arbitrary input/output connections. This approach will be replaced once ComfyUI officially supports **dynamic types**. Until then, while it functions without issues, type validation may still produce error messages. + + +### HuggingFace nodes + * These nodes provide functionalities based on HuggingFace repository models. + * The path where the HuggingFace model cache is stored can be changed through the `HF_HOME` environment variable. + * `HF Transformers Classifier Provider` - This is a node that provides a classifier based on HuggingFace's transformers models. + * The 'repo id' parameter should contain HuggingFace's repo id. When `preset_repo_id` is set to `Manual repo id`, use the manually entered repo id in `manual_repo_id`. + * e.g. 'rizvandwiki/gender-classification-2' is a repository that provides a model for gender classification. + * `SEGS Classify` - This node utilizes the `TRANSFORMERS_CLASSIFIER` loaded with 'HF Transformers Classifier Provider' to classify `SEGS`. + * The 'expr' allows for forms like `label > number`, and in the case of `preset_expr` being `Manual expr`, it uses the expression entered in `manual_expr`. + * For example, in the case of `male <= 0.4`, if the score of the `male` label in the classification result is less than or equal to 0.4, it is categorized as `filtered_SEGS`, otherwise, it is categorized as `remained_SEGS`. + * For supported labels, please refer to the `config.json` of the respective HuggingFace repository. + * `#Female` and `#Male` are symbols that group multiple labels such as `Female, women, woman, ...`, for convenience, rather than being single labels. + + +### Etc nodes + * `Impact Scheduler Adapter` - With the addition of AYS to the scheduler of the Impact Pack and Inspire Pack, there is an issue of incompatibility when the existing scheduler widget is converted to input. The Impact Scheduler Adapter allows for an indirect connection to be possible. + * `StringListToString` - Convert String List to String + * `WildcardPromptFromString` - Create labeled wildcard for detailer from string. + * This node works well when used with MakeTileSEGS. [[Link](https://github.com/ltdrdata/ComfyUI-Impact-Pack/pull/536#discussion_r1586060779)] + + * `String Selector` - It selects and returns a portion of the string. When `multiline` mode is disabled, it simply returns the string of the line pointed to by the selector. When `multiline` mode is enabled, it divides the string based on lines that start with `#` and returns them. If the `select` value is larger than the number of items, it will start counting from the first line again and return accordingly. + * `Combine Conditionings` - It takes multiple conditionings as input and combines them into a single conditioning. + * `Concat Conditionings` - It takes multiple conditionings as input and concat them into a single conditioning. + * `Negative Cond Placeholder` - Models like FLUX.1 do not use Negative Conditioning. This is a placeholder node for them. You can use FLUX.1 by replacing the Negative Conditioning used in Impact KSampler, KSampler (Inspire), and Detailer with this node. + * `Execution Order Controller` - A helper node that can forcibly control the execution order of nodes. + * Connect the output of the node that should be executed first to the signal, and make the input of the node that should be executed later pass through this node. + * `List Bridge` - When passing the list output through this node, it collects and organizes the data before forwarding it, which ensures that the previous stage's sub-workflow has been completed. + + +## Feature +* `Interactive SAM Detector (Clipspace)` - When you right-click on a node that has 'MASK' and 'IMAGE' outputs, a context menu will open. From this menu, you can either open a dialog to create a SAM Mask using 'Open in SAM Detector', or copy the content (likely mask data) using 'Copy (Clipspace)' and generate a mask using 'Impact SAM Detector' from the clipspace menu, and then paste it using 'Paste (Clipspace)'. +* Providing a feature to detect errors that occur when mixing models and clips from checkpoints such as `SDXL Base`, `SDXL Refiner`, `SD1.x`, `SD2.x` during sample execution, and reporting appropriate errors. + + +## How To Install? + +### Install via ComfyUI-Manager (Recommended) +* Search `ComfyUI Impact Pack` in ComfyUI-Manager and click `Install` button. + +### Manual Install (Not Recommended) +1. `cd custom_nodes` +2. `git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack` +3. `cd ComfyUI-Impact-Pack` +4. `pip install -r requirements.txt` + * **IMPORTANT**: + * You must install it within the Python environment where ComfyUI is running. + * For the portable version, use `\python_embeded\python.exe -m pip` instead of `pip`. For a `venv`, activate the `venv` first and then use `pip`. +5. Restart ComfyUI + +* NOTE1: If an error occurs during the installation process, please refer to [Troubleshooting Page](troubleshooting/TROUBLESHOOTING.md) for assistance. +* NOTE2: You can use this colab notebook [colab notebook](https://colab.research.google.com/github/ltdrdata/ComfyUI-Impact-Pack/blob/Main/notebook/comfyui_colab_impact_pack.ipynb) to launch it. This notebook automatically downloads the impact pack to the custom_nodes directory, installs the tested dependencies, and runs it. +* NOTE3: If you create an empty file named `skip_download_model` in the `ComfyUI/custom_nodes/` directory, it will skip the model download step during the installation of the impact pack. + + +## Package Dependencies (If you need to manual setup.) + +* pip install + * segment-anything + * scikit-image + * piexif + * opencv-python + * scipy + * numpy<2 + * dill + * matplotlib + * (optional) onnxruntime + * (deprecated) openmim # for mim + * (deprecated) pycocotools # for mim + +* linux packages (ubuntu) + * libgl1-mesa-glx + * libglib2.0-0 + + +## Config example +* Once you run the Impact Pack for the first time, an `impact-pack.ini` file will be automatically generated in the Impact Pack directory. You can modify this configuration file to customize the default behavior. + * `dependency_version` - don't touch this + * `sam_editor_cpu` - use cpu for `SAM editor` instead of gpu + * sam_editor_model: Specify the SAM model for the SAM editor. + * You can download various SAM models using ComfyUI-Manager. + * Path to SAM model: `ComfyUI/models/sams` +``` +[default] +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +``` + + +## Other Materials (auto-download when installing) + +* ComfyUI/models/sams <= https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth + + +## Troubleshooting page +* [Troubleshooting Page](troubleshooting/TROUBLESHOOTING.md) + + +## How To Use (DDetailer feature) + +#### 1. Basic auto face detection and refine exapmle. + +* The face that has been damaged due to low resolution is restored with high resolution by generating and synthesizing it, in order to restore the details. +* The FaceDetailer node is a combination of a Detector node for face detection and a Detailer node for image enhancement. See the [Advanced Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/tutorial/advanced.md) for a more detailed explanation. +* The MASK output of FaceDetailer provides a visualization of where the detected and enhanced areas are. + +  +* You can see that the face in the image on the left has increased detail as in the image on the right. + +#### 2. 2Pass refine (restore a severely damaged face) + +* Although two FaceDetailers can be attached together for a 2-pass configuration, various common inputs used in KSampler can be passed through DETAILER_PIPE, so FaceDetailerPipe can be used to configure easily. +* In 1pass, only rough outline recovery is required, so restore with a reasonable resolution and low options. However, if you increase the dilation at this time, not only the face but also the surrounding parts are included in the recovery range, so it is useful when you need to reshape the face other than the facial part. + +   +* In the first stage, the severely damaged face is restored to some extent, and in the second stage, the details are restored + +#### 3. Face Bbox(bounding box) + Person silhouette segmentation (prevent distortion of the background.) + +  + +* Facial synthesis that emphasizes details is delicately aligned with the contours of the face, and it can be observed that it does not affect the image outside of the face. + +* The BBoxDetectorForEach node is used to detect faces, and the SAMDetectorCombined node is used to find the segment related to the detected face. By using the Segs & Mask node with the two masks obtained in this way, an accurate mask that intersects based on segs can be generated. If this generated mask is input to the DetailerForEach node, only the target area can be created in high resolution from the image and then composited. + +#### 4. Iterative Upscale + + +* The IterativeUpscale node is a node that enlarges an image/latent by a scale_factor. In this process, the upscale is carried out progressively by dividing it into steps. +* IterativeUpscale takes an Upscaler as an input, similar to a plugin, and uses it during each iteration. PixelKSampleUpscalerProvider is an Upscaler that converts the latent representation to pixel space and applies ksampling. + * The upscale_model_opt is an optional parameter that determines whether to use the upscale function of the model base if available. Using the upscale function of the model base can significantly reduce the number of iterative steps required. If an x2 upscaler is used, the image/latent is first upscaled by a factor of 2 and then downscaled to the target scale at each step before further processing is done. + +* The following image is an image of 304x512 pixels and the same image scaled up to three times its original size using IterativeUpscale. + +  + + +#### 5. Interactive SAM Detector (Clipspace) + +* When you right-click on the node that outputs 'MASK' and 'IMAGE', a menu called "Open in SAM Detector" appears, as shown in the following picture. Clicking on the menu opens a dialog in SAM's functionality, allowing you to generate a segment mask. + + +* By clicking the left mouse button on a coordinate, a positive prompt in blue color is entered, indicating the area that should be included. Clicking the right mouse button on a coordinate enters a negative prompt in red color, indicating the area that should be excluded. Positive prompts represent the areas that should be included, while negative prompts represent the areas that should be excluded. +* You can remove the points that were added by using the "undo" button. After selecting the points, pressing the "detect" button generates the mask. Additionally, you can adjust the fidelity slider to determine the extent to which the mask belongs to the confidence region. + + + +* If you opened the dialog through "Open in SAM Detector" from the node, you can directly apply the changes by clicking the "Save to node" button. However, if you opened the dialog through the "clipspace" menu, you can save it to clipspace by clicking the "Save" button. + + + +* When you execute using the reflected mask in the node, you can observe that the image and mask are displayed separately. + + +## Others Tutorials +* [ComfyUI-extension-tutorials/ComfyUI-Impact-Pack](https://github.com/ltdrdata/ComfyUI-extension-tutorials/tree/Main/ComfyUI-Impact-Pack) - You can find various tutorials and workflows on this page. +* [Advanced Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/advanced.md) +* [SAM Application](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sam.md) +* [PreviewBridge](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/previewbridge.md) +* [Mask Pointer](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/maskpointer.md) +* [ONNX Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ONNX.md) +* [CLIPSeg Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/clipseg.md) +* [Extreme Highresolution Upscale](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/extreme-upscale.md) +* [TwoSamplersForMask](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoSamplers.md) +* [TwoAdvancedSamplersForMask](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoAdvancedSamplers.md) +* [Advanced Iterative Upscale: PK_HOOK](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/pk_hook.md) +* [Advanced Iterative Upscale: TwoSamplersForMask Upscale Provider](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoSamplersUpscale.md) +* [Interactive SAM + PreviewBridge](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sam_with_preview_bridge.md) +* [ImageSender/ImageReceiver/LatentSender/LatentReceiver](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sender_receiver.md) +* [ImpactWildcardProcessor](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcardProcessor.md) + + +## Credits + +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +dustysys/[ddetailer](https://github.com/dustysys/ddetailer) - DDetailer for Stable-diffusion-webUI extension. + +Bing-su/[dddetailer](https://github.com/Bing-su/dddetailer) - The anime-face-detector used in ddetailer has been updated to be compatible with mmdet 3.0.0, and we have also applied a patch to the pycocotools dependency for Windows environment in ddetailer. + +facebook/[segment-anything](https://github.com/facebookresearch/segment-anything) - Segmentation Anything! + +hysts/[anime-face-detector](https://github.com/hysts/anime-face-detector) - Creator of `anime-face_yolov3`, which has impressive performance on a variety of art styles. + +open-mmlab/[mmdetection](https://github.com/open-mmlab/mmdetection) - Object detection toolset. `dd-person_mask2former` was trained via transfer learning using their [R-50 Mask2Former instance segmentation model](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask2former#instance-segmentation) as a base. + +biegert/[ComfyUI-CLIPSeg](https://github.com/biegert/ComfyUI-CLIPSeg) - This is a custom node that enables the use of CLIPSeg technology, which can find segments through prompts, in ComfyUI. + +BlenderNeok/[ComfyUI-TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) - The tile sampler allows high-resolution sampling even in places with low GPU VRAM. + +BlenderNeok/[ComfyUI_Noise](https://github.com/BlenderNeko/ComfyUI_Noise) - The noise injection feature relies on this function and slerp code for noise variation + +WASasquatch/[was-node-suite-comfyui](https://github.com/WASasquatch/was-node-suite-comfyui) - A powerful custom node extensions of ComfyUI. + +Trung0246/[ComfyUI-0246](https://github.com/Trung0246/ComfyUI-0246) - Nice bypass hack! + +Layer-norm/[comfyui-lama-remover](https://github.com/Layer-norm/comfyui-lama-remover) - Required for using `LamaRemoverDetailerHook`. diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/__init__.py b/zavodik/nodes/ComfyUI-Impact-Pack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b98cbcfe9d16bcf76a2a102aed1478a9b108786c --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/__init__.py @@ -0,0 +1,456 @@ +""" +@author: Dr.Lt.Data +@title: Impact Pack +@nickname: Impact Pack +@description: This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler. +""" + +import folder_paths +import os +import sys +import logging + +comfy_path = os.path.dirname(folder_paths.__file__) +impact_path = os.path.join(os.path.dirname(__file__)) +modules_path = os.path.join(os.path.dirname(__file__), "modules") + +sys.path.append(modules_path) + +import impact.config +logging.info(f"### Loading: ComfyUI-Impact-Pack ({impact.config.version})") + +# Core +# recheck dependencies for colab +try: + import folder_paths + import torch # noqa: F401 + import cv2 # noqa: F401 + from cv2 import setNumThreads # noqa: F401 + import numpy as np # noqa: F401 + import comfy.samplers + import comfy.sd # noqa: F401 + from PIL import Image, ImageFilter # noqa: F401 + from skimage.measure import label, regionprops # noqa: F401 + from collections import namedtuple # noqa: F401 + import piexif # noqa: F401 + import nodes +except Exception as e: + import logging + logging.error("[Impact Pack] Failed to import due to several dependencies are missing!!!!") + raise e + + +import impact.impact_server # to load server api + +from .modules.impact.impact_pack import * # noqa: F403 +from .modules.impact.detectors import * # noqa: F403 +from .modules.impact.pipe import * # noqa: F403 +from .modules.impact.logics import * # noqa: F403 +from .modules.impact.util_nodes import * # noqa: F403 +from .modules.impact.segs_nodes import * # noqa: F403 +from .modules.impact.special_samplers import * # noqa: F403 +from .modules.impact.hf_nodes import * # noqa: F403 +from .modules.impact.bridge_nodes import * # noqa: F403 +from .modules.impact.hook_nodes import * # noqa: F403 +from .modules.impact.animatediff_nodes import * # noqa: F403 +from .modules.impact.segs_upscaler import * # noqa: F403 + +import threading + + +threading.Thread(target=impact.wildcards.wildcard_load).start() + + +NODE_CLASS_MAPPINGS = { + "SAMLoader": SAMLoader, # noqa: F405 + "CLIPSegDetectorProvider": CLIPSegDetectorProvider, # noqa: F405 + "ONNXDetectorProvider": ONNXDetectorProvider, # noqa: F405 + + "BitwiseAndMaskForEach": BitwiseAndMaskForEach, # noqa: F405 + "SubtractMaskForEach": SubtractMaskForEach, # noqa: F405 + + "DetailerForEach": DetailerForEach, # noqa: F405 + "DetailerForEachAutoRetry": DetailerForEachAutoRetry, # noqa: F405 + "DetailerForEachDebug": DetailerForEachTest, # noqa: F405 + "DetailerForEachPipe": DetailerForEachPipe, # noqa: F405 + "DetailerForEachDebugPipe": DetailerForEachTestPipe, # noqa: F405 + "DetailerForEachPipeForAnimateDiff": DetailerForEachPipeForAnimateDiff, # noqa: F405 + + "SAMDetectorCombined": SAMDetectorCombined, # noqa: F405 + "SAMDetectorSegmented": SAMDetectorSegmented, # noqa: F405 + + "FaceDetailer": FaceDetailer, # noqa: F405 + "FaceDetailerPipe": FaceDetailerPipe, # noqa: F405 + "MaskDetailerPipe": MaskDetailerPipe, # noqa: F405 + + "ToDetailerPipe": ToDetailerPipe, # noqa: F405 + "ToDetailerPipeSDXL": ToDetailerPipeSDXL, # noqa: F405 + "FromDetailerPipe": FromDetailerPipe, # noqa: F405 + "FromDetailerPipe_v2": FromDetailerPipe_v2, # noqa: F405 + "FromDetailerPipeSDXL": FromDetailerPipe_SDXL, # noqa: F405 + "AnyPipeToBasic": AnyPipeToBasic, # noqa: F405 + "ToBasicPipe": ToBasicPipe, # noqa: F405 + "FromBasicPipe": FromBasicPipe, # noqa: F405 + "FromBasicPipe_v2": FromBasicPipe_v2, # noqa: F405 + "BasicPipeToDetailerPipe": BasicPipeToDetailerPipe, # noqa: F405 + "BasicPipeToDetailerPipeSDXL": BasicPipeToDetailerPipeSDXL, # noqa: F405 + "DetailerPipeToBasicPipe": DetailerPipeToBasicPipe, # noqa: F405 + "EditBasicPipe": EditBasicPipe, # noqa: F405 + "EditDetailerPipe": EditDetailerPipe, # noqa: F405 + "EditDetailerPipeSDXL": EditDetailerPipeSDXL, # noqa: F405 + + "LatentPixelScale": LatentPixelScale, # noqa: F405 + "PixelKSampleUpscalerProvider": PixelKSampleUpscalerProvider, # noqa: F405 + "PixelKSampleUpscalerProviderPipe": PixelKSampleUpscalerProviderPipe, # noqa: F405 + "IterativeLatentUpscale": IterativeLatentUpscale, # noqa: F405 + "IterativeImageUpscale": IterativeImageUpscale, # noqa: F405 + "PixelTiledKSampleUpscalerProvider": PixelTiledKSampleUpscalerProvider, # noqa: F405 + "PixelTiledKSampleUpscalerProviderPipe": PixelTiledKSampleUpscalerProviderPipe, # noqa: F405 + "TwoSamplersForMaskUpscalerProvider": TwoSamplersForMaskUpscalerProvider, # noqa: F405 + "TwoSamplersForMaskUpscalerProviderPipe": TwoSamplersForMaskUpscalerProviderPipe, # noqa: F405 + + "PixelKSampleHookCombine": PixelKSampleHookCombine, # noqa: F405 + "DenoiseScheduleHookProvider": DenoiseScheduleHookProvider, # noqa: F405 + "StepsScheduleHookProvider": StepsScheduleHookProvider, # noqa: F405 + "CfgScheduleHookProvider": CfgScheduleHookProvider, # noqa: F405 + "NoiseInjectionHookProvider": NoiseInjectionHookProvider, # noqa: F405 + "UnsamplerHookProvider": UnsamplerHookProvider, # noqa: F405 + "CoreMLDetailerHookProvider": CoreMLDetailerHookProvider, # noqa: F405 + "PreviewDetailerHookProvider": PreviewDetailerHookProvider, # noqa: F405 + "BlackPatchRetryHookProvider": BlackPatchRetryHookProvider, # noqa: F405 + "CustomSamplerDetailerHookProvider": CustomSamplerDetailerHookProvider, # noqa: F405 + "LamaRemoverDetailerHookProvider": LamaRemoverDetailerHookProvider, # noqa: F405 + + "DetailerHookCombine": DetailerHookCombine, # noqa: F405 + "NoiseInjectionDetailerHookProvider": NoiseInjectionDetailerHookProvider, # noqa: F405 + "UnsamplerDetailerHookProvider": UnsamplerDetailerHookProvider, # noqa: F405 + "DenoiseSchedulerDetailerHookProvider": DenoiseSchedulerDetailerHookProvider, # noqa: F405 + "SEGSOrderedFilterDetailerHookProvider": SEGSOrderedFilterDetailerHookProvider, # noqa: F405 + "SEGSRangeFilterDetailerHookProvider": SEGSRangeFilterDetailerHookProvider, # noqa: F405 + "SEGSLabelFilterDetailerHookProvider": SEGSLabelFilterDetailerHookProvider, # noqa: F405 + "VariationNoiseDetailerHookProvider": VariationNoiseDetailerHookProvider, # noqa: F405 + # "CustomNoiseDetailerHookProvider": CustomNoiseDetailerHookProvider, + + "BitwiseAndMask": BitwiseAndMask, # noqa: F405 + "SubtractMask": SubtractMask, # noqa: F405 + "AddMask": AddMask, # noqa: F405 + "MaskRectArea": MaskRectArea, # noqa: F405 + "MaskRectAreaAdvanced": MaskRectAreaAdvanced, # noqa: F405 + "ImpactSegsAndMask": SegsBitwiseAndMask, # noqa: F405 + "ImpactSegsAndMaskForEach": SegsBitwiseAndMaskForEach, # noqa: F405 + "EmptySegs": EmptySEGS, # noqa: F405 + "ImpactFlattenMask": FlattenMask, # noqa: F405 + + "MediaPipeFaceMeshToSEGS": MediaPipeFaceMeshToSEGS, # noqa: F405 + "MaskToSEGS": MaskToSEGS, # noqa: F405 + "MaskToSEGS_for_AnimateDiff": MaskToSEGS_for_AnimateDiff, # noqa: F405 + "ToBinaryMask": ToBinaryMask, # noqa: F405 + "MasksToMaskList": MasksToMaskList, # noqa: F405 + "MaskListToMaskBatch": MaskListToMaskBatch, # noqa: F405 + "ImageListToImageBatch": ImageListToImageBatch, # noqa: F405 + "SetDefaultImageForSEGS": DefaultImageForSEGS, # noqa: F405 + "RemoveImageFromSEGS": RemoveImageFromSEGS, # noqa: F405 + + "BboxDetectorSEGS": BboxDetectorForEach, # noqa: F405 + "SegmDetectorSEGS": SegmDetectorForEach, # noqa: F405 + "ONNXDetectorSEGS": BboxDetectorForEach, # noqa: F405 + "ImpactSimpleDetectorSEGS_for_AD": SimpleDetectorForAnimateDiff, # noqa: F405 + "ImpactSAM2VideoDetectorSEGS": SAM2VideoDetectorSEGS, # noqa: F405 + "ImpactSimpleDetectorSEGS": SimpleDetectorForEach, # noqa: F405 + "ImpactSimpleDetectorSEGSPipe": SimpleDetectorForEachPipe, # noqa: F405 + "ImpactControlNetApplySEGS": ControlNetApplySEGS, # noqa: F405 + "ImpactControlNetApplyAdvancedSEGS": ControlNetApplyAdvancedSEGS, # noqa: F405 + "ImpactControlNetClearSEGS": ControlNetClearSEGS, # noqa: F405 + "ImpactIPAdapterApplySEGS": IPAdapterApplySEGS, # noqa: F405 + + "ImpactDecomposeSEGS": DecomposeSEGS, # noqa: F405 + "ImpactAssembleSEGS": AssembleSEGS, # noqa: F405 + "ImpactFrom_SEG_ELT": From_SEG_ELT, # noqa: F405 + "ImpactEdit_SEG_ELT": Edit_SEG_ELT, # noqa: F405 + "ImpactDilate_Mask_SEG_ELT": Dilate_SEG_ELT, # noqa: F405 + "ImpactDilateMask": DilateMask, # noqa: F405 + "ImpactGaussianBlurMask": GaussianBlurMask, # noqa: F405 + "ImpactDilateMaskInSEGS": DilateMaskInSEGS, # noqa: F405 + "ImpactGaussianBlurMaskInSEGS": GaussianBlurMaskInSEGS, # noqa: F405 + "ImpactScaleBy_BBOX_SEG_ELT": SEG_ELT_BBOX_ScaleBy, # noqa: F405 + "ImpactFrom_SEG_ELT_bbox": From_SEG_ELT_bbox, # noqa: F405 + "ImpactFrom_SEG_ELT_crop_region": From_SEG_ELT_crop_region, # noqa: F405 + "ImpactCount_Elts_in_SEGS": Count_Elts_in_SEGS, # noqa: F405 + + "BboxDetectorCombined_v2": BboxDetectorCombined, # noqa: F405 + "SegmDetectorCombined_v2": SegmDetectorCombined, # noqa: F405 + "SegsToCombinedMask": SegsToCombinedMask, # noqa: F405 + + "KSamplerProvider": KSamplerProvider, # noqa: F405 + "TwoSamplersForMask": TwoSamplersForMask, # noqa: F405 + "TiledKSamplerProvider": TiledKSamplerProvider, # noqa: F405 + + "KSamplerAdvancedProvider": KSamplerAdvancedProvider, # noqa: F405 + "TwoAdvancedSamplersForMask": TwoAdvancedSamplersForMask, # noqa: F405 + + "ImpactNegativeConditioningPlaceholder": NegativeConditioningPlaceholder, # noqa: F405 + + "PreviewBridge": PreviewBridge, # noqa: F405 + "PreviewBridgeLatent": PreviewBridgeLatent, # noqa: F405 + "ImageSender": ImageSender, # noqa: F405 + "ImageReceiver": ImageReceiver, # noqa: F405 + "LatentSender": LatentSender, # noqa: F405 + "LatentReceiver": LatentReceiver, # noqa: F405 + "ImageMaskSwitch": ImageMaskSwitch, # noqa: F405 + "LatentSwitch": GeneralSwitch, # noqa: F405 + "SEGSSwitch": GeneralSwitch, # noqa: F405 + "ImpactSwitch": GeneralSwitch, # noqa: F405 + "ImpactInversedSwitch": GeneralInversedSwitch, # noqa: F405 + + "ImpactWildcardProcessor": ImpactWildcardProcessor, # noqa: F405 + "ImpactWildcardEncode": ImpactWildcardEncode, # noqa: F405 + + "SEGSUpscaler": SEGSUpscaler, # noqa: F405 + "SEGSUpscalerPipe": SEGSUpscalerPipe, # noqa: F405 + "SEGSDetailer": SEGSDetailer, # noqa: F405 + "SEGSPaste": SEGSPaste, # noqa: F405 + "SEGSPreview": SEGSPreview, # noqa: F405 + "SEGSPreviewCNet": SEGSPreviewCNet, # noqa: F405 + "SEGSToImageList": SEGSToImageList, # noqa: F405 + "ImpactSEGSToMaskList": SEGSToMaskList, # noqa: F405 + "ImpactSEGSToMaskBatch": SEGSToMaskBatch, # noqa: F405 + "ImpactSEGSConcat": SEGSConcat, # noqa: F405 + "ImpactSEGSPicker": SEGSPicker, # noqa: F405 + "ImpactMakeTileSEGS": MakeTileSEGS, # noqa: F405 + "ImpactSEGSMerge": SEGSMerge, # noqa: F405 + + "SEGSDetailerForAnimateDiff": SEGSDetailerForAnimateDiff, # noqa: F405 + + "ImpactKSamplerBasicPipe": KSamplerBasicPipe, # noqa: F405 + "ImpactKSamplerAdvancedBasicPipe": KSamplerAdvancedBasicPipe, # noqa: F405 + + "ReencodeLatent": ReencodeLatent, # noqa: F405 + "ReencodeLatentPipe": ReencodeLatentPipe, # noqa: F405 + + "ImpactImageBatchToImageList": ImageBatchToImageList, # noqa: F405 + "ImpactMakeImageList": MakeImageList, # noqa: F405 + "ImpactMakeImageBatch": MakeImageBatch, # noqa: F405 + "ImpactMakeAnyList": MakeAnyList, # noqa: F405 + "ImpactMakeMaskList": MakeMaskList, # noqa: F405 + "ImpactMakeMaskBatch": MakeMaskBatch, # noqa: F405 + "ImpactSelectNthItemOfAnyList": NthItemOfAnyList, # noqa: F405 + + "RegionalSampler": RegionalSampler, # noqa: F405 + "RegionalSamplerAdvanced": RegionalSamplerAdvanced, # noqa: F405 + "CombineRegionalPrompts": CombineRegionalPrompts, # noqa: F405 + "RegionalPrompt": RegionalPrompt, # noqa: F405 + + "ImpactCombineConditionings": CombineConditionings, # noqa: F405 + "ImpactConcatConditionings": ConcatConditionings, # noqa: F405 + + "ImpactSEGSLabelAssign": SEGSLabelAssign, # noqa: F405 + "ImpactSEGSLabelFilter": SEGSLabelFilter, # noqa: F405 + "ImpactSEGSRangeFilter": SEGSRangeFilter, # noqa: F405 + "ImpactSEGSOrderedFilter": SEGSOrderedFilter, # noqa: F405 + "ImpactSEGSIntersectionFilter": SEGSIntersectionFilter, # noqa: F405 + "ImpactSEGSNMSFilter": SEGSNMSFilter, # noqa: F405 + + "ImpactCompare": ImpactCompare, # noqa: F405 + "ImpactConditionalBranch": ImpactConditionalBranch, # noqa: F405 + "ImpactConditionalBranchSelMode": ImpactConditionalBranchSelMode, # noqa: F405 + "ImpactIfNone": ImpactIfNone, # noqa: F405 + "ImpactConvertDataType": ImpactConvertDataType, # noqa: F405 + "ImpactLogicalOperators": ImpactLogicalOperators, # noqa: F405 + "ImpactInt": ImpactInt, # noqa: F405 + "ImpactFloat": ImpactFloat, # noqa: F405 + "ImpactBoolean": ImpactBoolean, # noqa: F405 + "ImpactValueSender": ImpactValueSender, # noqa: F405 + "ImpactValueReceiver": ImpactValueReceiver, # noqa: F405 + "ImpactImageInfo": ImpactImageInfo, # noqa: F405 + "ImpactLatentInfo": ImpactLatentInfo, # noqa: F405 + "ImpactMinMax": ImpactMinMax, # noqa: F405 + "ImpactNeg": ImpactNeg, # noqa: F405 + "ImpactConditionalStopIteration": ImpactConditionalStopIteration, # noqa: F405 + "ImpactStringSelector": StringSelector, # noqa: F405 + "StringListToString": StringListToString, # noqa: F405 + "WildcardPromptFromString": WildcardPromptFromString, # noqa: F405 + "ImpactExecutionOrderController": ImpactExecutionOrderController, # noqa: F405 + "ImpactListBridge": ImpactListBridge, # noqa: F405 + + "RemoveNoiseMask": RemoveNoiseMask, # noqa: F405 + + "ImpactLogger": ImpactLogger, # noqa: F405 + "ImpactDummyInput": ImpactDummyInput, # noqa: F405 + + "ImpactQueueTrigger": ImpactQueueTrigger, # noqa: F405 + "ImpactQueueTriggerCountdown": ImpactQueueTriggerCountdown, # noqa: F405 + "ImpactSetWidgetValue": ImpactSetWidgetValue, # noqa: F405 + "ImpactNodeSetMuteState": ImpactNodeSetMuteState, # noqa: F405 + "ImpactControlBridge": ImpactControlBridge, # noqa: F405 + "ImpactIsNotEmptySEGS": ImpactNotEmptySEGS, # noqa: F405 + "ImpactSleep": ImpactSleep, # noqa: F405 + "ImpactRemoteBoolean": ImpactRemoteBoolean, # noqa: F405 + "ImpactRemoteInt": ImpactRemoteInt, # noqa: F405 + + "ImpactHFTransformersClassifierProvider": HF_TransformersClassifierProvider, # noqa: F405 + "ImpactSEGSClassify": SEGS_Classify, # noqa: F405 + + "ImpactSchedulerAdapter": ImpactSchedulerAdapter, # noqa: F405 + "GITSSchedulerFuncProvider": GITSSchedulerFuncProvider # noqa: F405 +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + "SAMLoader": "SAMLoader (Impact)", + + "BboxDetectorSEGS": "BBOX Detector (SEGS)", + "SegmDetectorSEGS": "SEGM Detector (SEGS)", + "ONNXDetectorSEGS": "ONNX Detector (SEGS/legacy) - use BBOXDetector", + "ImpactSimpleDetectorSEGS_for_AD": "Simple Detector for Video (SEGS)", + "ImpactSAM2VideoDetectorSEGS": "SAM2 Video Detector (SEGS)", + "ImpactSimpleDetectorSEGS": "Simple Detector (SEGS)", + "ImpactSimpleDetectorSEGSPipe": "Simple Detector (SEGS/pipe)", + "ImpactControlNetApplySEGS": "ControlNetApply (SEGS) - DEPRECATED", + "ImpactControlNetApplyAdvancedSEGS": "ControlNetApply (SEGS)", + "ImpactIPAdapterApplySEGS": "IPAdapterApply (SEGS)", + + "BboxDetectorCombined_v2": "BBOX Detector (combined)", + "SegmDetectorCombined_v2": "SEGM Detector (combined)", + "SegsToCombinedMask": "SEGS to MASK (combined)", + "MediaPipeFaceMeshToSEGS": "MediaPipe FaceMesh to SEGS", + "MaskToSEGS": "MASK to SEGS", + "MaskToSEGS_for_AnimateDiff": "MASK to SEGS for Video", + "BitwiseAndMaskForEach": "Pixelwise(SEGS & SEGS)", + "SubtractMaskForEach": "Pixelwise(SEGS - SEGS)", + "ImpactSegsAndMask": "Pixelwise(SEGS & MASK)", + "ImpactSegsAndMaskForEach": "Pixelwise(SEGS & MASKS ForEach)", + "BitwiseAndMask": "Pixelwise(MASK & MASK)", + "SubtractMask": "Pixelwise(MASK - MASK)", + "AddMask": "Pixelwise(MASK + MASK)", + "MaskRectArea": "Mask Rect Area", + "MaskRectAreaAdvanced": "Mask Rect Area (Advanced)", + "ImpactFlattenMask": "Flatten Mask Batch", + "DetailerForEach": "Detailer (SEGS)", + "DetailerForEachAutoRetry": "Detailer (SEGS) with auto retry", + "DetailerForEachPipe": "Detailer (SEGS/pipe)", + "DetailerForEachDebug": "DetailerDebug (SEGS)", + "DetailerForEachDebugPipe": "DetailerDebug (SEGS/pipe)", + "SEGSDetailerForAnimateDiff": "SEGSDetailer For Video (SEGS/pipe)", + "DetailerForEachPipeForAnimateDiff": "Detailer For Video (SEGS/pipe)", + "SEGSUpscaler": "Upscaler (SEGS)", + "SEGSUpscalerPipe": "Upscaler (SEGS/pipe)", + + "SAMDetectorCombined": "SAMDetector (combined)", + "SAMDetectorSegmented": "SAMDetector (segmented)", + "FaceDetailerPipe": "FaceDetailer (pipe)", + "MaskDetailerPipe": "MaskDetailer (pipe)", + + "FromDetailerPipeSDXL": "FromDetailer (SDXL/pipe)", + "BasicPipeToDetailerPipeSDXL": "BasicPipe -> DetailerPipe (SDXL)", + "EditDetailerPipeSDXL": "Edit DetailerPipe (SDXL)", + + "BasicPipeToDetailerPipe": "BasicPipe -> DetailerPipe", + "DetailerPipeToBasicPipe": "DetailerPipe -> BasicPipe", + "EditBasicPipe": "Edit BasicPipe", + "EditDetailerPipe": "Edit DetailerPipe", + "AnyPipeToBasic": "Any PIPE -> BasicPipe", + + "LatentPixelScale": "Latent Scale (on Pixel Space)", + "IterativeLatentUpscale": "Iterative Upscale (Latent/on Pixel Space)", + "IterativeImageUpscale": "Iterative Upscale (Image)", + + "TwoSamplersForMaskUpscalerProvider": "TwoSamplersForMask Upscaler Provider", + "TwoSamplersForMaskUpscalerProviderPipe": "TwoSamplersForMask Upscaler Provider (pipe)", + + "ReencodeLatent": "Reencode Latent", + "ReencodeLatentPipe": "Reencode Latent (pipe)", + + "ImpactKSamplerBasicPipe": "KSampler (pipe)", + "ImpactKSamplerAdvancedBasicPipe": "KSampler (Advanced/pipe)", + "ImpactSEGSLabelAssign": "SEGS Assign (label)", + "ImpactSEGSLabelFilter": "SEGS Filter (label)", + "ImpactSEGSRangeFilter": "SEGS Filter (range)", + "ImpactSEGSOrderedFilter": "SEGS Filter (ordered)", + "ImpactSEGSIntersectionFilter": "SEGS Filter (intersection)", + "ImpactSEGSNMSFilter": "SEGS Filter (non max suppression)", + "ImpactSEGSConcat": "SEGS Concat", + "ImpactSEGSToMaskList": "SEGS to Mask List", + "ImpactSEGSToMaskBatch": "SEGS to Mask Batch", + "ImpactSEGSPicker": "Picker (SEGS)", + "ImpactMakeTileSEGS": "Make Tile SEGS", + "ImpactSEGSMerge": "SEGS Merge", + + "ImpactDecomposeSEGS": "Decompose (SEGS)", + "ImpactAssembleSEGS": "Assemble (SEGS)", + "ImpactFrom_SEG_ELT": "From SEG_ELT", + "ImpactEdit_SEG_ELT": "Edit SEG_ELT", + "ImpactFrom_SEG_ELT_bbox": "From SEG_ELT bbox", + "ImpactFrom_SEG_ELT_crop_region": "From SEG_ELT crop_region", + "ImpactDilate_Mask_SEG_ELT": "Dilate Mask (SEG_ELT)", + "ImpactScaleBy_BBOX_SEG_ELT": "ScaleBy BBOX (SEG_ELT)", + "ImpactCount_Elts_in_SEGS": "Count Elts in SEGS", + "ImpactDilateMask": "Dilate Mask", + "ImpactGaussianBlurMask": "Gaussian Blur Mask", + "ImpactDilateMaskInSEGS": "Dilate Mask (SEGS)", + "ImpactGaussianBlurMaskInSEGS": "Gaussian Blur Mask (SEGS)", + + "PreviewBridge": "Preview Bridge (Image)", + "PreviewBridgeLatent": "Preview Bridge (Latent)", + "ImageSender": "Image Sender", + "ImageReceiver": "Image Receiver", + "ImageMaskSwitch": "Switch (images, mask)", + "ImpactSwitch": "Switch (Any)", + "ImpactInversedSwitch": "Inversed Switch (Any)", + "ImpactExecutionOrderController": "Execution Order Controller", + "ImpactListBridge": "List Bridge", + + "MasksToMaskList": "Mask Batch to Mask List", + "MaskListToMaskBatch": "Mask List to Mask Batch", + "ImpactImageBatchToImageList": "Image Batch to Image List", + "ImageListToImageBatch": "Image List to Image Batch", + + "ImpactMakeImageList": "Make Image List", + "ImpactMakeImageBatch": "Make Image Batch", + "ImpactMakeMaskList": "Make Mask List", + "ImpactMakeMaskBatch": "Make Mask Batch", + "ImpactMakeAnyList": "Make List (Any)", + "ImpactSelectNthItemOfAnyList": "Select Nth Item (Any list)", + + "ImpactStringSelector": "String Selector", + "StringListToString": "String List to String", + "WildcardPromptFromString": "Wildcard Prompt from String", + "ImpactIsNotEmptySEGS": "SEGS isn't Empty", + "SetDefaultImageForSEGS": "Set Default Image for SEGS", + "RemoveImageFromSEGS": "Remove Image from SEGS", + + "RemoveNoiseMask": "Remove Noise Mask", + + "ImpactCombineConditionings": "Combine Conditionings", + "ImpactConcatConditionings": "Concat Conditionings", + + "ImpactQueueTrigger": "Queue Trigger", + "ImpactQueueTriggerCountdown": "Queue Trigger (Countdown)", + "ImpactSetWidgetValue": "Set Widget Value", + "ImpactNodeSetMuteState": "Set Mute State", + "ImpactControlBridge": "Control Bridge", + "ImpactSleep": "Sleep", + "ImpactRemoteBoolean": "Remote Boolean (on prompt)", + "ImpactRemoteInt": "Remote Int (on prompt)", + + "ImpactHFTransformersClassifierProvider": "HF Transformers Classifier Provider", + "ImpactSEGSClassify": "SEGS Classify", + + "LatentSwitch": "Switch (latent/legacy)", + "SEGSSwitch": "Switch (SEGS/legacy)", + + "SEGSPreviewCNet": "SEGSPreview (CNET Image)", + + "ImpactSchedulerAdapter": "Impact Scheduler Adapter", + "GITSSchedulerFuncProvider": "GITSScheduler Func Provider", + "ImpactNegativeConditioningPlaceholder": "Negative Cond Placeholder" +} + + +# NOTE: Inject directly into EXTENSION_WEB_DIRS instead of WEB_DIRECTORY +# Provide the js path fixed as ComfyUI-Impact-Pack instead of the path name, making it available for external use + +# WEB_DIRECTORY = "js" -- deprecated method +nodes.EXTENSION_WEB_DIRS["ComfyUI-Impact-Pack"] = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'js') + + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c405c6e9f7b3e46e05688f0f131d27913134ede8 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/custom_wildcards/put_wildcards_here b/zavodik/nodes/ComfyUI-Impact-Pack/custom_wildcards/put_wildcards_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/README.md b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7fb23361aeb59d7c953c33e01946316dd4520c71 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/README.md @@ -0,0 +1,39 @@ +# Wildcard System Documentation + +Progressive on-demand wildcard loading system for ComfyUI Impact Pack. + +## Documentation Structure + +- **[WILDCARD_SYSTEM_PRD.md](WILDCARD_SYSTEM_PRD.md)** - Product requirements and specifications +- **[WILDCARD_SYSTEM_DESIGN.md](WILDCARD_SYSTEM_DESIGN.md)** - Technical architecture and implementation +- **[WILDCARD_TESTING_GUIDE.md](WILDCARD_TESTING_GUIDE.md)** - Testing procedures and validation + +## Quick Links + +- Test Suite: `../../tests/` +- Test Samples: `../../tests/wildcards/samples/` +- Implementation: `../../modules/impact/wildcards.py` +- Server API: `../../modules/impact/impact_server.py` + +## Test Execution + +```bash +cd tests/ + +# Run all test suites +bash test_encoding.sh # UTF-8 multi-language (15 tests) +bash test_error_handling.sh # Error handling (10 tests) +bash test_edge_cases.sh # Edge cases (20 tests) +bash test_deep_nesting.sh # 7-level nesting (15 tests) +bash test_ondemand_loading.sh # On-demand loading (8 tests) +bash test_config_quotes.sh # Config quotes (5 tests) +``` + +## Status + +✅ **Production Ready** +- 73 tests, 100% pass rate (6 test suites) +- Complete PRD coverage +- Zero implementation bugs +- UTF-8 encoding verified +- Error handling validated diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/SUMMARY.md b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/SUMMARY.md new file mode 100644 index 0000000000000000000000000000000000000000..dbcb93a416123402d21541ab634cbcfa2d5998f1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/SUMMARY.md @@ -0,0 +1,151 @@ +# Wildcard System - Project Summary + +## Overview + +Progressive on-demand wildcard loading system for ComfyUI Impact Pack with dynamic prompt support, UTF-8 encoding, and comprehensive testing. + +**Status**: ✅ Production Ready +**Test Coverage**: 86 tests, 100% pass rate +**Documentation**: Complete PRD, design docs, and testing guide + +--- + +## Core Features + +- **Wildcard Expansion**: `__wildcard__` syntax with transitive multi-level expansion +- **Dynamic Prompts**: + - Basic selection: `{option1|option2|option3}` + - Weighted selection: `{10::common|1::rare}` (weight comes first) + - Multi-select: `{2$$, $$red|blue|green}` with custom separators +- **UTF-8 Support**: Korean, Chinese, Arabic, emoji, special characters +- **Pattern Matching**: Depth-agnostic `__*/name__` syntax +- **On-Demand Loading**: Progressive lazy loading with configurable cache limits +- **Error Handling**: Circular reference detection, graceful fallbacks + +--- + +## Architecture + +### Implementation +- `modules/impact/wildcards.py` - Core LazyWildcardLoader and expansion engine +- `modules/impact/impact_server.py` - Server API endpoint (/impact/wildcards) +- `modules/impact/config.py` - Configuration with quoted path support + +### Key Design Decisions +- **Lazy Loading**: Memory-efficient progressive loading strategy +- **Transitive Expansion**: Multi-level wildcard references through directory hierarchy +- **Case-Insensitive Matching**: Fuzzy matching for user convenience +- **Circular Reference Detection**: Max 100 iterations with clear error messages + +--- + +## Testing + +### Test Suites (86 tests) +1. **UTF-8 Encoding** (15 tests) - Multi-language support validation +2. **Error Handling** (10 tests) - Graceful error recovery +3. **Edge Cases** (20 tests) - Boundary conditions and special scenarios +4. **Deep Nesting** (17 tests) - 7-level transitive expansion + pattern matching +5. **On-Demand Loading** (8 tests) - Progressive loading with cache limits +6. **Config Quotes** (5 tests) - Configuration path handling +7. **Dynamic Prompts** (11 tests) - Statistical validation of dynamic features + +### Test Infrastructure +- Dedicated ports per suite (8188-8198) +- Automated server lifecycle management +- Comprehensive logging in `/tmp/` +- 100% pass rate with statistical validation + +--- + +## Documentation + +- **[README](README.md)** - Quick start and feature overview +- **[PRD](WILDCARD_SYSTEM_PRD.md)** - Complete product requirements +- **[Design](WILDCARD_SYSTEM_DESIGN.md)** - Technical architecture +- **[Testing Guide](WILDCARD_TESTING_GUIDE.md)** - Test procedures and validation + +--- + +## Quick Start + +### Basic Usage +```python +# Simple wildcard +"a photo of __animal__" + +# Dynamic prompt +"a {red|blue|green} __vehicle__" + +# Weighted selection (weight comes FIRST) +"{10::common|1::rare} scene" + +# Multi-select +"{2$$, $$happy|sad|angry|excited} person" +``` + +### Running Tests +```bash +cd tests/ +bash test_encoding.sh +bash test_error_handling.sh +bash test_edge_cases.sh +bash test_deep_nesting.sh +bash test_ondemand_loading.sh +bash test_config_quotes.sh +bash test_dynamic_prompts_full.sh +``` + +--- + +## Key Implementations + +### Weighted Selection Syntax +**Correct**: `{weight::option}` - Weight comes FIRST +- `{10::common|1::rare}` → 91% common, 9% rare ✅ +- `{5::red|3::green|2::blue}` → 50%, 30%, 20% ✅ + +**Incorrect**: `{option::weight}` - Treated as equal weights +- `{common::10|rare::1}` → 50% each ❌ + +### Empty Line Filtering +Filter empty lines AND comment lines: +```python +[x for x in lines if x.strip() and not x.strip().startswith('#')] +``` + +### Config Path Quotes +Strip quotes from configuration paths: +```python +custom_wildcards_path = default_conf.get('custom_wildcards', '').strip('\'"') +``` + +--- + +## Limitations + +- Weighted selection supports integers and simple decimals only +- Complex decimal weights may conflict with multiselect pattern detection +- Circular references limited to 100 iterations +- Prefer integer weight ratios for clarity + +--- + +## Performance + +- **Lazy Loading**: Only load wildcards when needed +- **On-Demand Mode**: Progressive loading based on cache limits +- **Memory Efficient**: Configurable cache size (0.5MB - 100MB) +- **Fast Lookup**: Optimized directory traversal with pattern matching + +--- + +## Production Ready + +✅ Zero known bugs +✅ Complete PRD coverage +✅ 100% test pass rate +✅ Statistical validation +✅ Comprehensive documentation +✅ Multi-language support +✅ Graceful error handling diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_SYSTEM_DESIGN.md b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_SYSTEM_DESIGN.md new file mode 100644 index 0000000000000000000000000000000000000000..ea5186fa5c81116995ff7576b90baf96d9ea0dca --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_SYSTEM_DESIGN.md @@ -0,0 +1,817 @@ +# Wildcard System - Design Document + +**Document Type**: Technical Design Document +**Product**: ComfyUI Impact Pack Wildcard System +**Version**: 2.0 (Depth-Agnostic Matching) +**Last Updated**: 2025-11-18 +**Status**: Released + +--- + +## 1. System Architecture + +### 1.1 High-Level Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ ComfyUI Frontend │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ ImpactWildcardProcessor / ImpactWildcardEncode │ │ +│ │ - Wildcard Prompt (editable) │ │ +│ │ - Populated Prompt (read-only in Populate mode) │ │ +│ │ - Mode: Populate / Fixed │ │ +│ │ - UI Indicator: 🟢 Full Cache / 🔵 On-Demand │ │ +│ └──────────────────────────────────────────────────────┘ │ +└────────────────────────┬─────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Impact Server (API) │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ POST /impact/wildcards │ │ +│ │ GET /impact/wildcards/list │ │ +│ │ GET /impact/wildcards/list/loaded │ │ +│ │ GET /impact/wildcards/refresh │ │ +│ └──────────────────────────────────────────────────────┘ │ +└────────────────────────┬─────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Wildcard Processing Engine │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ process() - Main entry point │ │ +│ │ ├─ process_comment_out() │ │ +│ │ ├─ replace_options() - {a|b|c} │ │ +│ │ └─ replace_wildcard() - __wildcard__ │ │ +│ │ │ │ +│ │ get_wildcard_value() │ │ +│ │ ├─ Direct lookup │ │ +│ │ ├─ Depth-agnostic fallback ⭐ NEW │ │ +│ │ └─ On-demand file loading │ │ +│ │ │ │ +│ │ get_wildcard_options() - {option1|__wild__|option3} │ │ +│ │ └─ Pattern matching for wildcards in options │ │ +│ └──────────────────────────────────────────────────────┘ │ +└────────────────────────┬─────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Loading System │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ Startup Phase │ │ +│ │ ├─ calculate_directory_size() - Early termination │ │ +│ │ ├─ Determine mode (Full Cache / On-Demand) │ │ +│ │ └─ scan_wildcard_metadata() - TXT metadata only │ │ +│ │ │ │ +│ │ Full Cache Mode │ │ +│ │ └─ load_wildcards() - Load all data │ │ +│ │ │ │ +│ │ On-Demand Mode ⭐ NEW │ │ +│ │ ├─ Pre-load: YAML files (keys in content) │ │ +│ │ └─ On-demand: TXT files (path = key) │ │ +│ └──────────────────────────────────────────────────────┘ │ +└────────────────────────┬─────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Data Storage │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ wildcard_dict = {} │ │ +│ │ - Full cache: All wildcard data │ │ +│ │ - On-demand: Not used │ │ +│ │ │ │ +│ │ available_wildcards = {} ⭐ NEW │ │ +│ │ - On-demand only: Metadata (path → file) │ │ +│ │ - Example: {"dragon": "/path/dragon.txt"} │ │ +│ │ │ │ +│ │ loaded_wildcards = {} ⭐ NEW │ │ +│ │ - On-demand only: Loaded data cache │ │ +│ │ - Example: {"dragon": ["red dragon", "blue..."]} │ │ +│ └──────────────────────────────────────────────────────┘ │ +└────────────────────────┬─────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ File System │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ wildcards/ (bundled) │ │ +│ │ custom_wildcards/ (user-defined) │ │ +│ │ ├─ *.txt files (one option per line) │ │ +│ │ └─ *.yaml files (nested structure) │ │ +│ └──────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## 2. Core Components + +### 2.1 Processing Engine + +#### 2.1.1 process() + +**Purpose**: Main entry point for wildcard text processing + +**Flow**: +```python +def process(text, seed=None): + 1. process_comment_out(text) # Remove # comments + 2. random.seed(seed) # Deterministic generation + 3. replace_options(text) # Process {a|b|c} + 4. replace_wildcard(text) # Process __wildcard__ + 5. return processed_text +``` + +**Features**: +- Maximum 100 iterations for nested expansion +- Deterministic with seed +- Supports transitive wildcards + +--- + +#### 2.1.2 replace_options() + +**Purpose**: Process dynamic prompts `{option1|option2}` + +**Supported Syntax**: +```python +{a|b|c} # Random selection +{3::a|2::b|c} # Weighted (3:2:1 ratio) +{2$$, $$a|b|c|d} # Multi-select 2, comma-separated +{2-4$$; $$a|b|c|d} # Multi-select 2-4, semicolon-separated +{a|{b|c}|d} # Nested options +``` + +**Algorithm**: +1. Parse weight prefix (`::`) +2. Calculate normalized probabilities +3. Use `np.random.choice()` with probabilities +4. Handle multi-select with custom separators + +--- + +#### 2.1.3 replace_wildcard() + +**Purpose**: Process wildcard references `__wildcard__` + +**Flow**: +```python +def replace_wildcard(string): + for each __match__: + 1. keyword = normalize(match) + 2. options = get_wildcard_value(keyword) + 3. if options: + random select from options + elif '*' in keyword: + pattern matching (for __*/name__) + else: + keep unchanged + 4. replace in string +``` + +**Pattern Matching** (`__*/name__`): +```python +if keyword.startswith('*/'): + base_name = keyword[2:] # "*/dragon" → "dragon" + for k in wildcards: + if matches_pattern(k, base_name): + collect options + combine all options +``` + +--- + +### 2.2 Depth-Agnostic Matching ⭐ NEW + +#### 2.2.1 get_wildcard_value() + +**Purpose**: Retrieve wildcard data with automatic depth-agnostic fallback + +**Algorithm**: +```python +def get_wildcard_value(key): + # Phase 1: Direct lookup + if key in loaded_wildcards: + return loaded_wildcards[key] + + # Phase 2: File discovery + file_path = find_wildcard_file(key) + if file_path: + load and cache + return data + + # Phase 3: Depth-agnostic fallback ⭐ NEW + matched_keys = [] + for k in available_wildcards: + if matches_depth_agnostic(k, key): + matched_keys.append(k) + + if matched_keys: + # Combine all matched wildcards + all_options = [] + for mk in matched_keys: + all_options.extend(get_wildcard_value(mk)) + + # Cache combined result + loaded_wildcards[key] = all_options + return all_options + + return None +``` + +**Pattern Matching Logic**: +```python +def matches_depth_agnostic(stored_key, search_key): + """ + Examples: + search_key = "dragon" + stored_key = "dragon" → True (exact) + stored_key = "custom_wildcards/dragon" → True (ends with) + stored_key = "dragon/wizard" → True (starts with) + stored_key = "a/b/dragon/c/d" → True (contains) + """ + return (stored_key == search_key or + stored_key.endswith('/' + search_key) or + stored_key.startswith(search_key + '/') or + ('/' + search_key + '/') in stored_key) +``` + +**Benefits**: +- Works with any directory structure +- No configuration needed +- Combines multiple sources for variety +- Cached for performance + +--- + +### 2.3 Loading System + +#### 2.3.1 Mode Detection + +**Decision Algorithm**: +```python +def determine_loading_mode(): + total_size = calculate_directory_size() + cache_limit = config.wildcard_cache_limit_mb * 1024 * 1024 + + if total_size >= cache_limit: + return ON_DEMAND_MODE + else: + return FULL_CACHE_MODE +``` + +**Early Termination**: +```python +def calculate_directory_size(): + size = 0 + for file in walk(directory): + size += file_size + if size >= cache_limit: + return size # Early termination + return size +``` + +**Performance**: < 1 second for 10GB+ collections + +--- + +#### 2.3.2 Metadata Scanning ⭐ NEW + +**Purpose**: Discover TXT wildcards without loading data + +**Algorithm**: +```python +def scan_wildcard_metadata(path): + for file in walk(path): + if file.endswith('.txt'): + rel_path = relpath(file, path) + key = normalize(remove_extension(rel_path)) + available_wildcards[key] = file # Store path only +``` + +**Storage**: +```python +available_wildcards = { + "dragon": "/path/custom_wildcards/dragon.txt", + "custom_wildcards/dragon": "/path/custom_wildcards/dragon.txt", + "dragon/wizard": "/path/dragon/wizard.txt", + ... +} +``` + +**Memory**: ~50 bytes per file (path string) + +--- + +#### 2.3.3 On-Demand Loading ⭐ NEW + +**Purpose**: Load wildcard data only when accessed + +**Flow**: +``` +User request: __dragon__ + ↓ +get_wildcard_value("dragon") + ↓ +Not in cache → find_wildcard_file("dragon") + ↓ +File not found → Depth-agnostic fallback + ↓ +Pattern match: ["custom_wildcards/dragon", "dragon/wizard", ...] + ↓ +Load each matched file + ↓ +Combine all options + ↓ +Cache result: loaded_wildcards["dragon"] = combined_options + ↓ +Return combined_options +``` + +**YAML Pre-Loading**: +```python +def load_yaml_wildcards(): + """ + YAML wildcards CANNOT be on-demand because: + - Keys are inside file content, not file path + - Must parse entire file to discover keys + + Example: + File: colors.yaml + Content: + warm: [red, orange, yellow] + cold: [blue, green, purple] + + To know "__colors/warm__" exists, must parse entire file. + """ + for yaml_file in find_yaml_files(): + data = yaml.load(yaml_file) + for key, value in data.items(): + loaded_wildcards[key] = value +``` + +--- + +### 2.4 Data Structures + +#### 2.4.1 Global State + +```python +# Configuration +_on_demand_mode = False # True if on-demand mode active +wildcard_dict = {} # Full cache mode storage +available_wildcards = {} # On-demand metadata (key → file path) +loaded_wildcards = {} # On-demand loaded data (key → options) + +# Thread safety +wildcard_lock = threading.Lock() +``` + +#### 2.4.2 Key Normalization + +```python +def wildcard_normalize(x): + """ + Normalize wildcard keys for consistent lookup + + Examples: + "Dragon" → "dragon" (lowercase) + "dragon.txt" → "dragon" (remove extension) + "folder/Dragon" → "folder/dragon" (lowercase) + """ + return x.lower().replace('\\', '/') +``` + +--- + +## 3. API Design + +### 3.1 POST /impact/wildcards + +**Purpose**: Process wildcard text + +**Request**: +```json +{ + "text": "a {red|blue} __flowers__", + "seed": 42 +} +``` + +**Response**: +```json +{ + "text": "a red rose" +} +``` + +**Implementation**: +```python +@app.post("/impact/wildcards") +def process_wildcards(request): + text = request.json["text"] + seed = request.json.get("seed") + result = process(text, seed) + return {"text": result} +``` + +--- + +### 3.2 GET /impact/wildcards/list/loaded ⭐ NEW + +**Purpose**: Track progressive loading + +**Response**: +```json +{ + "data": ["__dragon__", "__flowers__"], + "on_demand_mode": true, + "total_available": 1000 +} +``` + +**Implementation**: +```python +@app.get("/impact/wildcards/list/loaded") +def get_loaded_wildcards(): + with wildcard_lock: + if _on_demand_mode: + return { + "data": [f"__{k}__" for k in loaded_wildcards.keys()], + "on_demand_mode": True, + "total_available": len(available_wildcards) + } + else: + return { + "data": [f"__{k}__" for k in wildcard_dict.keys()], + "on_demand_mode": False, + "total_available": len(wildcard_dict) + } +``` + +--- + +### 3.3 GET /impact/wildcards/refresh + +**Purpose**: Reload all wildcards + +**Implementation**: +```python +@app.get("/impact/wildcards/refresh") +def refresh_wildcards(): + global wildcard_dict, loaded_wildcards, available_wildcards + + with wildcard_lock: + # Clear all caches + wildcard_dict.clear() + loaded_wildcards.clear() + available_wildcards.clear() + + # Re-initialize + wildcard_load() + + return {"status": "ok"} +``` + +--- + +## 4. File Format Support + +### 4.1 TXT Format + +**Structure**: +``` +# flowers.txt +rose +tulip +# Comments start with # +sunflower +``` + +**Parsing**: +```python +def load_txt_wildcard(file_path): + with open(file_path) as f: + lines = f.read().splitlines() + return [x for x in lines if not x.strip().startswith('#')] +``` + +**On-Demand**: ✅ Fully supported + +--- + +### 4.2 YAML Format + +**Structure**: +```yaml +# colors.yaml +warm: + - red + - orange + - yellow + +cold: + - blue + - green + - purple +``` + +**Usage**: `__colors/warm__`, `__colors/cold__` + +**Parsing**: +```python +def load_yaml_wildcard(file_path): + data = yaml.load(file_path) + for key, value in data.items(): + if isinstance(value, list): + loaded_wildcards[key] = value + elif isinstance(value, dict): + # Recursive for nested structure + load_nested(key, value) +``` + +**On-Demand**: ⚠️ Always pre-loaded (keys in content) + +--- + +## 5. UI Integration + +### 5.1 ImpactWildcardProcessor Node + +**Features**: +- **Wildcard Prompt**: User input with wildcard syntax +- **Populated Prompt**: Processed result +- **Mode Selector**: Populate / Fixed + - **Populate**: Process wildcards on queue, populate result + - **Fixed**: Use populated text as-is (for saved images) + +**UI Indicator**: +- 🟢 **Full Cache**: All wildcards loaded +- 🔵 **On-Demand**: Progressive loading active (shows count) + +--- + +### 5.2 ImpactWildcardEncode Node + +**Additional Features**: +- **LoRA Loading**: `` +- **LoRA Block Weight**: `` +- **BREAK Syntax**: Separate encoding with Concat +- **Clip Integration**: Returns processed model + clip + +**Special Syntax**: +``` + +``` + +--- + +### 5.3 Detailer Wildcard Features + +**Ordering**: +- `[ASC]`: Ascending order (x, y) +- `[DSC]`: Descending order (x, y) +- `[ASC-SIZE]`: Ascending by area +- `[DSC-SIZE]`: Descending by area +- `[RND]`: Random order + +**Control**: +- `[SEP]`: Separate prompts per detection area +- `[SKIP]`: Skip detailing for this area +- `[STOP]`: Stop detailing (including current area) +- `[LAB]`: Label-based application +- `[CONCAT]`: Concatenate with positive conditioning + +**Example**: +``` +[ASC] +1girl, blue eyes, smile [SEP] +1boy, brown eyes [SEP] +``` + +--- + +## 6. Performance Optimization + +### 6.1 Startup Optimization + +**Techniques**: +1. **Early Termination**: Stop size calculation at cache limit +2. **Metadata Only**: Don't load TXT file content +3. **YAML Pre-loading**: Small files, pre-load is acceptable + +**Results**: +- 10GB collection: 20-60 min → < 1 min (95%+ improvement) + +--- + +### 6.2 Runtime Optimization + +**Techniques**: +1. **Caching**: Store loaded wildcards in memory +2. **Depth-Agnostic Caching**: Cache combined pattern results +3. **NumPy Random**: Fast random generation + +**Results**: +- First access: < 50ms +- Cached access: < 1ms + +--- + +### 6.3 Memory Optimization + +**Techniques**: +1. **Progressive Loading**: Load only accessed wildcards +2. **Metadata Storage**: Store paths, not data +3. **Combined Caching**: Cache pattern match results + +**Results**: +- Initial: < 100MB (vs 1GB+ in old implementation) +- Growth: Linear with usage, not total size + +--- + +## 7. Error Handling + +### 7.1 File Not Found + +**Scenario**: Wildcard file doesn't exist + +**Handling**: +```python +def get_wildcard_value(key): + file_path = find_wildcard_file(key) + if file_path is None: + # Try depth-agnostic fallback + matched = find_pattern_matches(key) + if matched: + return combine_matched(matched) + + # No match found - log warning, return None + logging.warning(f"Wildcard not found: {key}") + return None +``` + +**User Impact**: Wildcard remains unexpanded + +--- + +### 7.2 File Read Error + +**Scenario**: Cannot read file (permissions, encoding, etc.) + +**Handling**: +```python +def load_txt_wildcard(file_path): + try: + with open(file_path, 'r', encoding="ISO-8859-1") as f: + return f.read().splitlines() + except Exception as e: + logging.error(f"Failed to load {file_path}: {e}") + return None +``` + +**User Impact**: Wildcard not loaded, error logged + +--- + +### 7.3 Infinite Loop Protection + +**Scenario**: Circular wildcard references + +**Protection**: +```python +def process(text, seed=None): + max_iterations = 100 + for i in range(max_iterations): + new_text = process_one_pass(text) + if new_text == text: + break # No changes, done + text = new_text + + if i == max_iterations - 1: + logging.warning("Max iterations reached") + + return text +``` + +**User Impact**: Processing stops after 100 iterations + +--- + +## 8. Testing Strategy + +### 8.1 Unit Tests + +**Coverage**: +- `process()`: All syntax variations +- `replace_options()`: Weight, multi-select, nested +- `replace_wildcard()`: Direct, pattern, depth-agnostic +- `get_wildcard_value()`: Direct, fallback, caching + +--- + +### 8.2 Integration Tests + +**Scenarios**: +- Full cache mode activation +- On-demand mode activation +- Progressive loading tracking +- Depth-agnostic matching +- API endpoints + +**Test Suite**: `tests/test_dragon_wildcard_expansion.sh` + +--- + +### 8.3 Performance Tests + +**Metrics**: +- Startup time (10GB collection) +- Memory usage (initial, after 100 accesses) +- First access latency +- Cached access latency +- Pattern matching latency + +**Test Tool**: `/tmp/test_depth_agnostic.sh` + +--- + +## 9. Security Considerations + +### 9.1 Path Traversal + +**Risk**: Malicious wildcard names could access files outside wildcard directory + +**Mitigation**: +```python +def find_wildcard_file(key): + # Normalize and validate path + safe_key = os.path.normpath(key) + if '..' in safe_key or safe_key.startswith('/'): + logging.error(f"Invalid wildcard path: {key}") + return None + + # Ensure result is within wildcard directory + file_path = os.path.join(wildcards_path, safe_key) + if not file_path.startswith(wildcards_path): + logging.error(f"Path traversal attempt: {key}") + return None + + return file_path +``` + +--- + +### 9.2 Resource Exhaustion + +**Risk**: Very large wildcards or infinite loops + +**Mitigation**: +1. **Iteration Limit**: Max 100 expansions +2. **File Size Limit**: Reasonable file size checks +3. **Memory Monitoring**: Track loaded wildcard count + +--- + +## 10. Future Enhancements + +### 10.1 Planned Features + +1. **LRU Cache**: Automatic eviction of least-used wildcards +2. **Background Preloading**: Preload frequently-used wildcards +3. **Persistent Cache**: Save loaded wildcards across restarts +4. **Usage Statistics**: Track wildcard access patterns +5. **Compression**: Compress infrequently-used wildcards + +### 10.2 Performance Improvements + +1. **Parallel Loading**: Load multiple wildcards concurrently +2. **Index Structure**: B-tree for faster lookups +3. **Memory Pooling**: Reduce allocation overhead + +--- + +## 11. References + +### 11.1 External Documentation + +- [Product Requirements Document](WILDCARD_SYSTEM_PRD.md) +- [User Guide](WILDCARD_SYSTEM_OVERVIEW.md) +- [Testing Guide](WILDCARD_TESTING_GUIDE.md) +- [Tutorial](../../ComfyUI-extension-tutorials/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md) + +### 11.2 Code References + +- **Core Engine**: `modules/impact/wildcards.py` +- **API Server**: `modules/impact/impact_server.py` +- **UI Nodes**: `nodes.py` (ImpactWildcardProcessor, ImpactWildcardEncode) + +--- + +**Document Approval**: +- Engineering Lead: ✅ Approved +- Architecture Review: ✅ Approved +- Security Review: ✅ Approved + +**Last Review**: 2025-11-18 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_SYSTEM_PRD.md b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_SYSTEM_PRD.md new file mode 100644 index 0000000000000000000000000000000000000000..61388f89330a9c86dfbe08691e0c3ea9efa9c9f6 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_SYSTEM_PRD.md @@ -0,0 +1,435 @@ +# Wildcard System - Product Requirements Document + +**Product**: ComfyUI Impact Pack Wildcard System +**Version**: 2.0 (Depth-Agnostic Matching) +**Status**: Released +**Last Updated**: 2025-11-18 + +--- + +## 1. Overview + +### 1.1 Product Vision + +The Wildcard System provides **dynamic text generation** for AI prompts, enabling users to create rich, varied prompts with minimal manual effort. + +### 1.2 Target Users + +- **AI Artists**: Creating varied prompts for image generation +- **Content Creators**: Generating diverse text content +- **Game Designers**: Dynamic NPC dialogue and procedural content +- **ComfyUI Users**: Workflow automation with dynamic text + +--- + +## 2. Core Features + +> **Note**: For detailed syntax examples and usage guides, see the [ImpactWildcard Tutorial](../../../ComfyUI-extension-tutorials/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md). + +### 2.1 Wildcard Syntax + +**Basic Wildcards**: +- `__wildcard_name__` - Simple text replacement (e.g., `__flower__` → random flower from flower.txt) +- `__category/subcategory__` - Hierarchical organization with subdirectories (e.g., `__obj/person__`) +- Transitive wildcards - Wildcards can reference other wildcards +- Case-insensitive matching - `__Jewel__` and `__jewel__` are identical +- `*` aggregation pattern (V4.15.1+) - Groups all items from path and subdirectories into one collection + +**Quantifiers**: +- `N#__wildcard__` - Repeat wildcard N times +- Example: `5#__wildcards__` expands to `__wildcards__|__wildcards__|__wildcards__|__wildcards__|__wildcards__` +- Can be combined with multi-select: `{2$$, $$5#__wildcards__}` + +**Comments**: +- Lines starting with `#` are treated as comments and removed +- Text following a comment is separated by single blank space from text before comment +- Example: + ``` + first {a|b|c} second # not a comment, + # this is a comment + trailing text + ``` + Becomes: `first a second # not a comment, trailing text` + +**Pattern Matching**: +- `__*/wildcard__` - Depth-agnostic pattern matching at any directory level +- Automatic fallback when direct lookup fails + +--- + +### 2.2 Dynamic Prompts + +**Basic Selection**: +- `{option1|option2|option3}` - Random selection from options +- Unlimited nesting: `{a|{d|e|f}|c}` - Nested options are evaluated +- Example: `{blue apple|red {cherry|berry}|green melon}` → `blue apple`, `red cherry`, `red berry`, or `green melon` +- Complex nesting: `1{girl is holding {blue pencil|red __fruit__|colorful __flower__}|boy is riding __vehicle__}` + +**Weighted Selection**: +- `{weight::option}` - Control selection probability +- **Syntax**: Weight comes FIRST, then `::`, then the option value +- **Correct**: `{10::common|1::rare}` → 10:1 ratio (≈91% vs ≈9%) +- **Incorrect**: `{common::10|rare::1}` → Will be treated as equal weights (50% vs 50%) +- Weights are normalized: `{5::red|3::green|2::blue}` → 50% red, 30% green, 20% blue +- Unweighted options default to weight 1: `{5::red|green|2::blue}` → 5:1:2 ratio + +**Limitations**: +- Weights must be integers or simple decimals (e.g., `5`, `10`, `0.5`) +- Complex decimal weights may cause parsing issues due to multiselect pattern conflicts +- For decimal ratios, prefer integer equivalents: use `{5::a|3::b|2::c}` instead of `{0.5::a|0.3::b|0.2::c}` + +**Multi-Select**: +- `{n$$opt1|opt2|opt3}` - Select exactly n items +- `{n1-n2$$opt1|opt2|opt3}` - Select between n1 and n2 items (excess ignored if range exceeds options) +- `{-n$$opt1|opt2|opt3}` - Select between 1 and n items +- **Custom separator**: `{n$$ separator $$opt1|opt2|opt3}` + - Example: `{2$$ and $$red|blue|green}` → "red and blue" + - Example: `{1-2$$ or $$apple|orange|banana}` → "apple" or "apple or orange" + +--- + +### 2.3 ComfyUI Nodes + +**ImpactWildcardProcessor**: +- **Purpose**: Browser-level wildcard processing for prompt generation +- **Dual Input Fields**: + - Upper field: Wildcard Prompt (accepts wildcard syntax) + - Lower field: Populated Prompt (displays generated result) +- **Mode Control**: + - **Populate**: Processes wildcards on queue prompt, populates result (read-only) + - **Fixed**: Ignores wildcard prompt, allows manual editing of populated prompt +- **Seed Input**: + - Supports seed-based deterministic generation + - Compatible seed inputs: `ImpactInt`, `Seed (rgthree)` only + - Limitation: Reads superficial input only, does not use execution results from other nodes +- **UI Indicator**: + - 🟢 Full Cache: All wildcards pre-loaded + - 🔵 On-Demand: Shows count of loaded wildcards + +**ImpactWildcardEncode**: +- All features of ImpactWildcardProcessor +- **LoRA Loading**: `` syntax + - If `clip_weight` omitted, uses same value as `model_weight` + - All loaded LoRAs applied to both `model` and `clip` outputs +- **LoRA Block Weight (LBW)** (requires Inspire Pack): + - Syntax: `` + - Use `;` as separator within spec, recommended to end with `;` + - Specs without `A=` or `B=` → used in `Lora Loader (Block Weight)` node + - Specs with `A=` or `B=` → parameters for `A` and `B` in loader node + - Examples: + - `` + - `` + - `` +- **BREAK Syntax**: Separately encode prompts and connect using `Conditioning (Concat)` +- **Output**: Returns processed conditioning with all LoRAs applied + +--- + +### 2.4 Detailer Integration + +Special syntax for Detailer Wildcard nodes (region-specific prompt application). + +**Ordering Control** (place at very beginning of prompt): +- `[ASC]` - Ascending order by (x, y) coordinates (left takes precedence, then top) +- `[DSC]` - Descending order by (x, y) coordinates +- `[ASC-SIZE]` - Ascending order by area size +- `[DSC-SIZE]` - Descending order by area size +- `[RND]` - Random order +- Example: `[ASC]\n1girl, blue eyes, smile [SEP]\n1boy, brown eyes [SEP]` + +**Area Control**: +- `[SEP]` - Separator for different prompts per detection area (SEG) +- `[SKIP]` - Skip detailing for current SEG +- `[STOP]` - Stop detailing, including current SEG +- `[CONCAT]` - Concatenate wildcard conditioning with positive conditioning (instead of replacing) + +**Label-Based Application**: +- `[LAB]` - Apply prompts based on labels (each label appears once) +- `[ALL]` - Prefix that applies to all labels +- Example: + ``` + [LAB] + [ALL] laugh, detailed eyes + [Female] blue eyes + [Male] brown eyes + ``` + Female labels get: "laugh, detailed eyes, blue eyes" + Male labels get: "laugh, detailed eyes, brown eyes" + +**Complete Example**: +``` +[DSC-SIZE] +sun glasses[SEP] +[SKIP][SEP] +blue glasses[SEP] +[STOP] +``` +Result: Faces sorted by size descending, largest gets "sun glasses", second largest skipped, third gets "blue glasses", rest not detailed. + +--- + +### 2.5 File Formats + +**TXT Files**: +- **Format**: One option per line (comma-separated on single line = one item) +- **Comments**: Lines starting with `#` are comments +- **Encoding**: UTF-8 +- **Loading**: Supports on-demand loading (loaded only when used) +- **Subfolder Support**: Use path in wildcard name (e.g., `custom_wildcards/obj/person.txt` → `__obj/person__`) +- **Example** (flower.txt): + ``` + rose + orchid + iris + carnation + lily + ``` + +**YAML Files** (V4.18.4+): +- **Format**: Nested hierarchical structure with multiple levels +- **Usage**: Keys become wildcard paths (e.g., `astronomy.Celestial-Bodies` → `__astronomy/Celestial-Bodies__`) +- **Loading**: Always pre-loaded at startup (keys exist in file content, not path) +- **Example**: + ```yaml + astronomy: + Celestial-Bodies: + - Star + - Planet + surface-swap: + - swap the surfaces for + - replace the surfaces with + ``` +- **Performance Note**: For large collections with on-demand loading, prefer TXT file structure over YAML + +**Wildcard Directories**: +- Default directories: `ComfyUI-Impact-Pack/wildcards/` and `ComfyUI-Impact-Pack/custom_wildcards/` +- Recommendation: Use `custom_wildcards/` to avoid conflicts during updates +- Custom path: Configure via `impact-pack.ini` → `custom_wildcards` setting + +--- + +### 2.6 System Features + +**Progressive On-Demand Loading** ⭐: +- **Automatic Mode Detection**: System chooses optimal loading strategy based on collection size +- **Full Cache Mode** (total size < 50MB): + - All wildcards loaded into memory at startup + - Instant access with no load delays + - UI Indicator: 🟢 `Select Wildcard 🟢 Full Cache` + - Startup log: `Using full cache mode.` +- **On-Demand Mode** (total size ≥ 50MB): + - Only metadata scanned at startup (< 1 minute for 10GB+) + - Actual wildcard data loaded progressively as accessed + - Low initial memory (< 100MB) + - UI Indicator: 🔵 `Select Wildcard 🔵 On-Demand: X loaded` + - Startup log: `Using on-demand loading mode (metadata scan only).` +- **Configuration**: Adjust threshold via `impact-pack.ini` → `wildcard_cache_limit_mb = 50` +- **File Type Behavior**: + - TXT files: Full on-demand loading support + - YAML files: Always pre-loaded (keys embedded in content) +- **Refresh Behavior**: Clears all cached data, re-scans directories, re-determines mode + +**Depth-Agnostic Matching** ⭐: +- **Automatic Fallback**: When direct lookup fails, searches for pattern matches at any depth +- **Pattern Matching**: Finds keys that end with, start with, or contain the wildcard name +- **Multi-Source Combination**: Combines all matched wildcards into single selection pool +- **Zero Configuration**: Works automatically with any directory structure +- **Performance**: Results cached for subsequent access + +**Wildcard Refresh API**: +- `GET /impact/wildcards/refresh` - Reload wildcards without restarting ComfyUI +- Clears all cached data (full cache and on-demand loaded) +- Re-scans wildcard directories +- Re-determines loading mode + +**Other APIs**: +- `POST /impact/wildcards` - Process wildcard text with seed +- `GET /impact/wildcards/list` - List all available wildcards +- `GET /impact/wildcards/list/loaded` - Show currently loaded wildcards (on-demand mode) + +**Deterministic Generation**: +- Seed-based random selection ensures reproducibility +- Same seed + same wildcard = same result +- Compatible with ImpactInt and Seed(rgthree) nodes + +--- + +## 3. Requirements + +### 3.1 Functional Requirements + +**FR-1: Wildcard Processing** +- Support all documented syntax patterns +- Deterministic results with seed control +- Up to 100 levels of nested expansion +- Graceful error handling + +**FR-2: Dynamic Prompts** +- Random, weighted, and multi-select +- Unlimited nesting depth +- Custom separators + +**FR-3: Progressive Loading** +- Automatic mode detection +- On-demand loading for large collections +- Real-time tracking + +**FR-4: Depth-Agnostic Matching** +- Automatic fallback pattern matching +- Combine all matched wildcards +- Support any directory structure + +**FR-5: ComfyUI Integration** +- ImpactWildcardProcessor node +- ImpactWildcardEncode node with LoRA +- Detailer special syntax + +--- + +### 3.2 Non-Functional Requirements + +**NFR-1: Usability** +- Time to first success: < 5 minutes +- Zero configuration for basic use +- Clear error messages + +**NFR-2: Reliability** +- 100% deterministic with same seed +- Graceful error handling +- No data loss on refresh + +**NFR-3: Compatibility** +- Python 3.8+ +- Windows, Linux, macOS +- Backward compatible with v1.x + +**NFR-4: Scalability** +- Collections up to 100GB +- Up to 1M wildcard files +- Concurrent multi-user access + +--- + +## 4. Configuration + +**File**: `impact-pack.ini` (in ComfyUI-Impact-Pack directory) + +```ini +[default] +# Custom wildcard directory (optional) +# Use this to specify additional wildcard directory path +custom_wildcards = /path/to/wildcards + +# Cache size limit in MB (default: 50) +# Determines threshold for Full Cache vs On-Demand mode +wildcard_cache_limit_mb = 50 +``` + +**Default Wildcard Directories**: +- `ComfyUI-Impact-Pack/wildcards/` - System wildcards (avoid modifying) +- `ComfyUI-Impact-Pack/custom_wildcards/` - User wildcards (recommended) +- Custom path via `custom_wildcards` setting (optional) + +**Configuration Best Practices**: +- No configuration required for basic use +- Use `custom_wildcards/` to avoid conflicts during updates +- Adjust `wildcard_cache_limit_mb` based on system memory and collection size: + - Lower limit → More likely to use on-demand mode (slower first access, lower memory) + - Higher limit → More likely to use full cache mode (faster access, higher memory) +- For large collections (10GB+), consider organizing into subdirectories for better performance + +--- + +## 5. User Workflows + +### 5.1 Getting Started + +**Goal**: First wildcard in < 5 minutes + +1. Create file: `custom_wildcards/flower.txt` +2. Add content (one per line): + ``` + rose + orchid + iris + carnation + lily + ``` +3. Use in ImpactWildcardProcessor: `a beautiful __flower__` +4. Set mode to Populate and run queue prompt +5. Result: Random selection like "a beautiful rose" + +### 5.2 Reusable Prompt Templates + +**Goal**: Save frequently used prompts + +1. Create `custom_wildcards/ppos.txt` with: + ``` + photorealistic:1.4, best quality:1.4 + ``` +2. Use concise prompt: `__ppos__, beautiful nature` +3. Result: "photorealistic:1.4, best quality:1.4, beautiful nature" + +### 5.3 Large Collections + +**Goal**: Import 10GB+ seamlessly + +1. Copy large wildcard collection to directory +2. Start ComfyUI (< 1 minute startup with on-demand mode) +3. Check UI indicator: 🔵 On-Demand mode active +4. Use wildcards immediately (loaded on first access) +5. Subsequent uses are cached for speed + +### 5.4 LoRA + Wildcards + +**Goal**: Dynamic character with LoRA + +1. Create `custom_wildcards/characters.txt`: + ``` + young girl with blue dress + warrior with armor + mage with robe + ``` +2. Use ImpactWildcardEncode node +3. Prompt: `__characters__, {day|night} scene, detailed face` +4. Result: Random character with LoRA loaded + random time of day + +### 5.5 Multi-Face Detailing + +**Goal**: Different prompts for multiple detected faces + +1. Create Detailer Wildcard prompt: + ``` + [DSC-SIZE] + blue eyes, smile[SEP] + brown eyes, serious[SEP] + green eyes, laugh + ``` +2. Result: Largest face gets "blue eyes, smile", second gets "brown eyes, serious", third gets "green eyes, laugh" + +--- + +## 6. References + +### User Documentation +- **[ImpactWildcard Tutorial](../../../ComfyUI-extension-tutorials/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md)** - Complete feature documentation + +### Technical Documentation +- **[Design Document](WILDCARD_SYSTEM_DESIGN.md)** - Architecture details +- **[Testing Guide](WILDCARD_TESTING_GUIDE.md)** - Test procedures + +--- + +## Appendix: Glossary + +- **Wildcard**: Reusable text snippet (`__name__`) +- **Dynamic Prompt**: Inline options (`{a|b|c}`) +- **Pattern Matching**: Finding wildcards by partial match +- **Depth-Agnostic**: Works with any directory structure +- **On-Demand Loading**: Load data when accessed +- **LoRA**: Low-Rank Adaptation models +- **Detailer**: Node for region-specific processing + +--- + +**Last Updated**: 2025-11-18 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_TESTING_GUIDE.md b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_TESTING_GUIDE.md new file mode 100644 index 0000000000000000000000000000000000000000..1edfe51b0741088c0c582d19641ea4299b1fb220 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/docs/wildcards/WILDCARD_TESTING_GUIDE.md @@ -0,0 +1,381 @@ +# Wildcard System Testing Guide + +Complete testing guide for the ComfyUI Impact Pack wildcard system. + +--- + +## 📋 Table of Contents + +1. [Test Overview](#test-overview) +2. [Test Suites](#test-suites) +3. [Quick Start](#quick-start) +4. [Running Tests](#running-tests) +5. [Test Validation](#test-validation) + +--- + +## Test Overview + +### Test Statistics +- **Total Tests**: 86 tests across 7 suites +- **Coverage**: 100% of PRD core requirements +- **Pass Rate**: 100% +- **Test Types**: UTF-8, error handling, edge cases, nesting, on-demand, config, dynamic prompts + +### Test Structure + +``` +tests/ +├── Test Suites (7 suites, 86 tests) +│ ├── test_encoding.sh # 15 tests - UTF-8 multi-language support +│ ├── test_error_handling.sh # 10 tests - Error recovery and graceful handling +│ ├── test_edge_cases.sh # 20 tests - Boundary conditions and special cases +│ ├── test_deep_nesting.sh # 17 tests - 7-level transitive expansion + pattern matching +│ ├── test_ondemand_loading.sh # 8 tests - Progressive lazy loading with cache limits +│ ├── test_config_quotes.sh # 5 tests - Configuration path handling +│ └── test_dynamic_prompts_full.sh # 11 tests - Weighted/multiselect with statistical validation +│ +├── Documentation +│ ├── README.md # Test suite overview +│ └── RUN_ALL_TESTS.md # Execution guide +│ +├── Test Samples +│ └── wildcards/samples/ # Test wildcard files +│ ├── level1/.../level7/ # 7-level nesting structure +│ ├── *.txt # Various test wildcards +│ └── 아름다운색.txt # Korean UTF-8 sample +│ +└── Utilities + └── restart_test_server.sh # Server management utility +``` + +--- + +## Test Suites + +### 1. UTF-8 Encoding Tests (15 tests) +**File**: `test_encoding.sh` +**Port**: 8188 +**Purpose**: Multi-language support validation + +**Test Coverage**: +- Korean text (한글) +- Chinese text (中文) +- Arabic text (العربية) +- Emoji support (🐉🔥⚡) +- Special characters +- Mixed multi-language content +- Case-insensitive Korean matching + +**Key Validations**: +- All non-ASCII characters preserved +- UTF-8 encoding consistency +- No character corruption +- Proper string comparison + +--- + +### 2. Error Handling Tests (10 tests) +**File**: `test_error_handling.sh` +**Port**: 8189 +**Purpose**: Graceful error recovery + +**Test Coverage**: +- Non-existent wildcards +- Missing files +- Circular reference detection (direct and indirect) +- Malformed dynamic prompt syntax +- Deep nesting without crashes +- Invalid quantifiers + +**Key Validations**: +- No server crashes +- Clear error messages +- Original text preserved on error +- Circular detection within 100 iterations + +--- + +### 3. Edge Cases Tests (20 tests) +**File**: `test_edge_cases.sh` +**Port**: 8190 +**Purpose**: Boundary conditions and special scenarios + +**Test Coverage**: +- Empty lines and comments in wildcard files +- Very long lines (>1000 chars) +- Basic wildcard expansion +- Case-insensitive matching +- Quantifiers (1-10 repetitions) +- Pattern matching (`__*/name__`) + +**Key Validations**: +- Empty lines filtered correctly +- Comments ignored properly +- Long text handling +- Quantifier accuracy +- Pattern matching at any depth + +--- + +### 4. Deep Nesting Tests (17 tests) +**File**: `test_deep_nesting.sh` +**Port**: 8194 +**Purpose**: 7-level transitive expansion and pattern matching + +**Test Coverage**: +- Direct level access (Level 1-7) +- Transitive expansion through all levels +- Multiple wildcard nesting +- Mixed depth combinations +- Quantifiers with nesting +- Weighted selection with nesting +- Depth-agnostic pattern matching + +**Key Validations**: +- All 7 levels fully expanded +- No unexpanded wildcards remain +- Pattern matching ignores directory depth +- Complex combinations work correctly + +**Directory Structure**: +``` +samples/level1/level2/level3/level4/level5/level6/level7/ +``` + +--- + +### 5. On-Demand Loading Tests (8 tests) +**File**: `test_ondemand_loading.sh` +**Port**: 8191 +**Purpose**: Progressive lazy loading with configurable cache limits + +**Test Coverage**: +- Small cache (1MB) - On-demand mode +- Medium cache (10MB) - Hybrid mode +- Large cache (100MB) - Full cache mode +- Aggressive lazy (0.5MB) +- Various thresholds (5MB, 20MB, 50MB) + +**Key Validations**: +- Correct loading mode selection +- Progressive loading functionality +- Cache limit enforcement +- No performance degradation + +**Note**: Uses temporary samples in `/tmp/` with auto-cleanup + +--- + +### 6. Config Quotes Tests (5 tests) +**File**: `test_config_quotes.sh` +**Port**: 8192 +**Purpose**: Configuration path handling with quotes + +**Test Coverage**: +- Paths with single quotes +- Paths with double quotes +- Paths with spaces (quoted) +- Mixed quote scenarios +- Unquoted baseline + +**Key Validations**: +- Quotes stripped correctly +- Paths with spaces handled +- Wildcards loaded from quoted paths + +--- + +### 7. Dynamic Prompts Tests (11 tests) +**File**: `test_dynamic_prompts_full.sh` +**Port**: 8193 +**Purpose**: Statistical validation of weighted and multiselect features + +**Test Coverage**: +- Multiselect (2-5 items) with custom separators +- Weighted selection (various ratios: 10:1, 1:1:1, 5:3:2) +- Nested dynamic prompts +- Basic random selection +- Seed variation validation + +**Statistical Validation**: +- 100 iterations for weighted selection +- 20 iterations for multiselect +- Distribution verification (±15% tolerance) +- Duplicate detection +- Separator validation + +**Key Validations**: +- Exact item count for multiselect +- No duplicates in multiselect +- Correct separators +- Statistical distribution matches weight ratios +- Nested prompt expansion + +--- + +## Quick Start + +### Run All Tests +```bash +cd tests/ +bash test_encoding.sh && \ +bash test_error_handling.sh && \ +bash test_edge_cases.sh && \ +bash test_deep_nesting.sh && \ +bash test_ondemand_loading.sh && \ +bash test_config_quotes.sh && \ +bash test_dynamic_prompts_full.sh +``` + +### Run Individual Suite +```bash +cd tests/ +bash test_encoding.sh +``` + +### Check Test Results +All tests output: +- ✅ PASS - Test succeeded with validation +- ❌ FAIL - Test failed (should not occur) +- ⚠️ WARNING - Partial success or non-critical issue + +--- + +## Running Tests + +### Prerequisites +- ComfyUI server must be installable +- Port availability (8188-8194) +- Network access to 127.0.0.1 +- Python 3 with json module + +### Automatic Server Management +All test suites automatically: +1. Kill any existing server on target port +2. Create temporary configuration file +3. Start ComfyUI server +4. Wait for server ready (up to 60s) +5. Execute tests +6. Clean up (kill server, remove config) + +### Test Execution Flow +``` +1. Setup + ├─ Kill existing server on port + ├─ Create impact-pack.ini config + └─ Start ComfyUI server + +2. Wait for Ready + ├─ Poll server every second + ├─ Max 60 seconds timeout + └─ Log tail on failure + +3. Execute Tests + ├─ Call /impact/wildcards API + ├─ Validate responses + └─ Check behavior + +4. Cleanup + ├─ Kill server process + └─ Remove config file +``` + +--- + +## Test Validation + +### What Tests Validate + +**Behavioral Validation** (Not just "no errors"): +- **Weighted Selection**: Statistical distribution matches weight ratios +- **Multiselect**: Exact count, no duplicates, correct separator +- **Nesting**: All levels fully expanded, no remaining wildcards +- **Pattern Matching**: Depth-agnostic matching works correctly +- **UTF-8**: Character preservation and proper encoding +- **Error Handling**: Graceful recovery with meaningful messages + +### Success Criteria +- All 86 tests must pass (100% pass rate) +- No server crashes or hangs +- API responses within expected format +- Statistical distributions within ±15% tolerance +- No unexpanded wildcards in final output + +### Validation Examples + +**Weighted Selection**: +```bash +# Test 10:1 ratio with 100 iterations +# Expected: ~91% common, ~9% rare +# Actual: Count distribution within ±15% +``` + +**Multiselect**: +```bash +# Test {2$$, $$red|blue|green} +# Expected: Exactly 2 items, comma-space separator, no duplicates +# Validation: Count words, check separator, detect duplicates +``` + +**Pattern Matching**: +```bash +# Test __*/dragon__ +# Expected: Matches dragon.txt, fantasy/dragon.txt, dragon/fire.txt +# Validation: No unexpanded wildcards remain +``` + +--- + +## Troubleshooting + +### Common Issues + +**Server Fails to Start**: +```bash +# Check log file +tail -20 /tmp/{test_name}_test.log + +# Check port availability +lsof -i :8188 + +# Kill conflicting process +pkill -f "python.*main.py.*--port 8188" +``` + +**Tests Timeout**: +- Increase wait time in test script (default 60s) +- Check server performance and resources +- Verify network connectivity to 127.0.0.1 + +**Statistical Tests Fail**: +- Expected for very small sample sizes +- ±15% tolerance accounts for randomness +- Rerun test to verify consistency + +**UTF-8 Issues**: +- Ensure terminal supports UTF-8 +- Check file encoding: `file -i tests/wildcards/samples/*.txt` +- Verify locale: `locale | grep UTF-8` + +--- + +## Test Maintenance + +### Adding New Tests +1. Create new test function in appropriate suite +2. Follow existing test patterns (setup, execute, validate, cleanup) +3. Update test counts in README.md and SUMMARY.md +4. Update this guide with new test description + +### Modifying Existing Tests +1. Preserve behavioral validation (not just "no errors") +2. Maintain statistical rigor for dynamic prompt tests +3. Update documentation if test purpose changes +4. Verify all 86 tests still pass after modification + +### Test Philosophy +- **Tests validate behavior**, not just execution success +- **Statistical validation** for probabilistic features +- **Real-world scenarios** with production-like setup +- **Comprehensive coverage** of all PRD requirements diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/1-FaceDetailer.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/1-FaceDetailer.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4711d5bdb53f174d58983f332cfc64708fe67be2 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/1-FaceDetailer.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1a4ed7a9079d45a01a52043d9672d2646fdd28b88eac958e51dc2b38aa438c0 +size 64932 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/1-FaceDetailer.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/1-FaceDetailer.json new file mode 100644 index 0000000000000000000000000000000000000000..ec15733efd098e56b0fad42431b0bc92ae0ac5f0 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/1-FaceDetailer.json @@ -0,0 +1,1269 @@ +{ + "last_node_id": 61, + "last_link_id": 170, + "nodes": [ + { + "id": 28, + "type": "KSampler", + "pos": [ + 530, + 840 + ], + "size": [ + 320, + 600 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 65 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 57 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 170 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 59, + "slot_index": 3 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 60 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 431433362471142, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 43, + "type": "PreviewImage", + "pos": [ + 2390, + -140 + ], + "size": [ + 230, + 300 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 142 + } + ], + "outputs": [], + "title": "Cropped (refined)", + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 52, + "type": "PreviewImage", + "pos": [ + 2390, + 210 + ], + "size": [ + 230, + 310 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 146 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 53, + "type": "UltralyticsDetectorProvider", + "pos": [ + 1290, + 200 + ], + "size": [ + 315, + 78 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "shape": 3, + "links": [ + 150 + ], + "slot_index": 0 + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 16, + "type": "SAMLoader", + "pos": [ + 1290, + 340 + ], + "size": [ + 320, + 82 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 151 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 30, + "type": "VAEDecode", + "pos": [ + 1010, + 840 + ], + "size": [ + 140, + 50 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 60 + }, + { + "name": "vae", + "type": "VAE", + "link": 164 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 78, + 152 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -640, + 190 + ], + "size": [ + 312.0885314941406, + 98 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 64, + 157 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 148, + 149, + 159 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 161, + 163 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/fantexiRealistic_v10.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 58, + "type": "Reroute", + "pos": [ + 850, + 220 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 163 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 164 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 5, + "type": "CLIPTextEncode", + "pos": [ + -120, + 300 + ], + "size": [ + 310, + 180 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 148 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 57, + 165 + ], + "slot_index": 0 + } + ], + "title": "Positive", + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "(photorealistic:1.4), best quality, masterpiece, 1girl, (detailed eyes), perfect anatomy, smile, details, perfect eyes, perfect face, (SpringGreen+letter_printed_sleeveless_turtleneck), ((white_low_waist_jeans)), (thigh_gap:1.2), at_the_top_of_mountain, snow, daytime, windy, path, mountain_villa, sky_view, slender, looking_away, (small breast:1.2)" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -120, + 540 + ], + "size": [ + 310, + 120 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 149 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 167 + ], + "slot_index": 0 + } + ], + "title": "Negative", + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:easynegative, embedding:badhandv4, paintings, sketches, (worst quality:1.4, low quality, normal quality), lowres, normal quality, (monochrome), (grayscale), skin spots, acnes, skin blemishes, age spot, glans, nsfw, watermark, signature, text, bikini, bad anatomy, (six_fingers), (nail_art), nail polish, blush, fruit," + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 60, + "type": "Reroute", + "pos": [ + 340, + 540 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 167 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 168, + 170 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 31, + "type": "Reroute", + "pos": [ + 130, + 190 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 64 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 65 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 29, + "type": "EmptyLatentImage", + "pos": [ + -120, + 900 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 59 + ] + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 296, + 512, + 1 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 51, + "type": "FaceDetailer", + "pos": [ + 1720, + -330 + ], + "size": [ + 350, + 1180 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 152 + }, + { + "name": "model", + "type": "MODEL", + "link": 158 + }, + { + "name": "clip", + "type": "CLIP", + "link": 160 + }, + { + "name": "vae", + "type": "VAE", + "link": 162 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 166 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 169 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 150 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "shape": 7, + "link": 151 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "shape": 7, + "link": null + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 141 + ], + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": [ + 142 + ], + "slot_index": 1 + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "shape": 6, + "links": [ + 146 + ], + "slot_index": 2 + }, + { + "name": "mask", + "type": "MASK", + "shape": 3, + "links": [ + 153 + ], + "slot_index": 3 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "shape": 3, + "links": null + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": null + } + ], + "properties": { + "Node name for S&R": "FaceDetailer" + }, + "widgets_values": [ + 360, + true, + 768, + 0, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + 0.5, + 15, + 3, + "center-1", + 0, + 0.93, + 0, + 0.7, + "False", + 10, + "", + 1, + false, + 20, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 17, + "type": "MaskToImage", + "pos": [ + 2150, + 590 + ], + "size": [ + 176.39999389648438, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 153 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 107 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 18, + "type": "PreviewImage", + "pos": [ + 2390, + 590 + ], + "size": [ + 230, + 290 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 107 + } + ], + "outputs": [], + "title": "Mask", + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 7, + "type": "PreviewImage", + "pos": [ + 2660, + -320 + ], + "size": [ + 430, + 650 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 141 + } + ], + "outputs": [], + "title": "Enhanced", + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 33, + "type": "PreviewImage", + "pos": [ + 1250, + 840 + ], + "size": [ + 360, + 630 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 78 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 55, + "type": "Reroute", + "pos": [ + -190, + -310 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 157 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 158 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 56, + "type": "Reroute", + "pos": [ + -190, + -290 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 159 + } + ], + "outputs": [ + { + "name": "", + "type": "CLIP", + "links": [ + 160 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 57, + "type": "Reroute", + "pos": [ + -190, + -270 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 161 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 162 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 59, + "type": "Reroute", + "pos": [ + 290, + -250 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 165 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 166 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 61, + "type": "Reroute", + "pos": [ + 520, + -230 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 168 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 169 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + } + ], + "links": [ + [ + 57, + 5, + 0, + 28, + 1, + "CONDITIONING" + ], + [ + 59, + 29, + 0, + 28, + 3, + "LATENT" + ], + [ + 60, + 28, + 0, + 30, + 0, + "LATENT" + ], + [ + 64, + 4, + 0, + 31, + 0, + "*" + ], + [ + 65, + 31, + 0, + 28, + 0, + "MODEL" + ], + [ + 78, + 30, + 0, + 33, + 0, + "IMAGE" + ], + [ + 107, + 17, + 0, + 18, + 0, + "IMAGE" + ], + [ + 141, + 51, + 0, + 7, + 0, + "IMAGE" + ], + [ + 142, + 51, + 1, + 43, + 0, + "IMAGE" + ], + [ + 146, + 51, + 2, + 52, + 0, + "IMAGE" + ], + [ + 148, + 4, + 1, + 5, + 0, + "CLIP" + ], + [ + 149, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 150, + 53, + 0, + 51, + 6, + "BBOX_DETECTOR" + ], + [ + 151, + 16, + 0, + 51, + 7, + "SAM_MODEL" + ], + [ + 152, + 30, + 0, + 51, + 0, + "IMAGE" + ], + [ + 153, + 51, + 3, + 17, + 0, + "MASK" + ], + [ + 157, + 4, + 0, + 55, + 0, + "*" + ], + [ + 158, + 55, + 0, + 51, + 1, + "MODEL" + ], + [ + 159, + 4, + 1, + 56, + 0, + "*" + ], + [ + 160, + 56, + 0, + 51, + 2, + "CLIP" + ], + [ + 161, + 4, + 2, + 57, + 0, + "*" + ], + [ + 162, + 57, + 0, + 51, + 3, + "VAE" + ], + [ + 163, + 4, + 2, + 58, + 0, + "*" + ], + [ + 164, + 58, + 0, + 30, + 1, + "VAE" + ], + [ + 165, + 5, + 0, + 59, + 0, + "*" + ], + [ + 166, + 59, + 0, + 51, + 4, + "CONDITIONING" + ], + [ + 167, + 6, + 0, + 60, + 0, + "*" + ], + [ + 168, + 60, + 0, + 61, + 0, + "*" + ], + [ + 169, + 61, + 0, + 51, + 5, + "CONDITIONING" + ], + [ + 170, + 60, + 0, + 28, + 2, + "CONDITIONING" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 1, + "offset": [ + 740, + 430 + ] + }, + "groupNodes": {}, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "node_versions": { + "comfy-core": "0.3.14", + "comfyui-impact-subpack": "74db20c95eca152a6d686c914edc0ef4e4762cb8", + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/2-MaskDetailer.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/2-MaskDetailer.jpg new file mode 100644 index 0000000000000000000000000000000000000000..198296605d5d51654e9997a87b1536a8fa632ff3 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/2-MaskDetailer.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1abd1823bed26d5e8c92707c0369d41b4f7c6a0629e70c3fa92fcc914585dc8a +size 114261 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/2-MaskDetailer.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/2-MaskDetailer.json new file mode 100644 index 0000000000000000000000000000000000000000..014f5008c03d17661b5c243dd7e15eb29c4033ae --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/2-MaskDetailer.json @@ -0,0 +1,596 @@ +{ + "last_node_id": 5, + "last_link_id": 5, + "nodes": [ + { + "id": 1, + "type": "LoadImage", + "pos": [ + 30, + 210 + ], + "size": [ + 390, + 320 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 1 + ] + }, + { + "name": "MASK", + "type": "MASK", + "shape": 3, + "links": [ + 2 + ] + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-609196.2000000011.png [input]", + "image" + ] + }, + { + "id": 5, + "type": "PreviewImage", + "pos": [ + 1230, + 210 + ], + "size": [ + 210, + 246 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [] + }, + { + "id": 3, + "type": "workflow>Impact::MAKE_BASIC_PIPE", + "pos": [ + 20, + 620 + ], + "size": [ + 400, + 200 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 3 + ] + } + ], + "properties": { + "Node name for S&R": "workflow/Impact::MAKE_BASIC_PIPE" + }, + "widgets_values": [ + "SD1.5/realcartoon3d_v13.safetensors", + "(best quality:1.4), fox girl", + "(worst quality:1.4), nsfw" + ] + }, + { + "id": 2, + "type": "MaskDetailerPipe", + "pos": [ + 530, + 210 + ], + "size": [ + 569.4000244140625, + 850 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 1 + }, + { + "name": "mask", + "type": "MASK", + "link": 2 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 3, + "slot_index": 2 + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 7, + "link": null + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 5 + ], + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "shape": 6, + "links": [ + 4 + ], + "slot_index": 2 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": null + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "MaskDetailerPipe" + }, + "widgets_values": [ + 512, + true, + 1024, + true, + 1003, + "fixed", + 20, + 8, + "euler", + "normal", + 0.75, + 5, + 3, + 10, + 0.2, + 1, + 1, + false, + 20, + false, + false + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 4, + "type": "PreviewImage", + "pos": [ + 1230, + 560 + ], + "size": [ + 210, + 246 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 4 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [] + } + ], + "links": [ + [ + 1, + 1, + 0, + 2, + 0, + "IMAGE" + ], + [ + 2, + 1, + 1, + 2, + 1, + "MASK" + ], + [ + 3, + 3, + 0, + 2, + 2, + "BASIC_PIPE" + ], + [ + 4, + 2, + 2, + 4, + 0, + "IMAGE" + ], + [ + 5, + 2, + 0, + 5, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 1, + "offset": [ + 80, + -110 + ] + }, + "groupNodes": { + "Impact::MAKE_BASIC_PIPE": { + "author": "Dr.Lt.Data", + "category": "", + "config": { + "1": { + "input": { + "text": { + "name": "Positive prompt" + } + } + }, + "2": { + "input": { + "text": { + "name": "Negative prompt" + } + } + } + }, + "datetime": 1708272471445, + "external": [], + "links": [ + [ + 0, + 1, + 1, + 0, + 1, + "CLIP" + ], + [ + 0, + 1, + 2, + 0, + 1, + "CLIP" + ], + [ + 0, + 0, + 3, + 0, + 1, + "MODEL" + ], + [ + 0, + 1, + 3, + 1, + 1, + "CLIP" + ], + [ + 0, + 2, + 3, + 2, + 1, + "VAE" + ], + [ + 1, + 0, + 3, + 3, + 3, + "CONDITIONING" + ], + [ + 2, + 0, + 3, + 4, + 4, + "CONDITIONING" + ] + ], + "nodes": [ + { + "flags": {}, + "index": 0, + "mode": 0, + "order": 0, + "outputs": [ + { + "links": [], + "name": "MODEL", + "shape": 3, + "slot_index": 0, + "type": "MODEL", + "localized_name": "MODEL" + }, + { + "links": [], + "name": "CLIP", + "shape": 3, + "slot_index": 1, + "type": "CLIP", + "localized_name": "CLIP" + }, + { + "links": [], + "name": "VAE", + "shape": 3, + "slot_index": 2, + "type": "VAE", + "localized_name": "VAE" + } + ], + "pos": [ + 550, + 360 + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "size": { + "0": 315, + "1": 98 + }, + "type": "CheckpointLoaderSimple", + "widgets_values": [ + "SDXL/sd_xl_base_1.0_0.9vae.safetensors" + ], + "inputs": [] + }, + { + "flags": {}, + "index": 1, + "inputs": [ + { + "link": null, + "name": "clip", + "type": "CLIP", + "localized_name": "clip" + } + ], + "mode": 0, + "order": 1, + "outputs": [ + { + "links": [], + "name": "CONDITIONING", + "shape": 3, + "slot_index": 0, + "type": "CONDITIONING", + "localized_name": "CONDITIONING" + } + ], + "pos": [ + 940, + 480 + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "size": { + "0": 263, + "1": 99 + }, + "title": "Positive", + "type": "CLIPTextEncode", + "widgets_values": [ + "" + ] + }, + { + "flags": {}, + "index": 2, + "inputs": [ + { + "link": null, + "name": "clip", + "type": "CLIP", + "localized_name": "clip" + } + ], + "mode": 0, + "order": 2, + "outputs": [ + { + "links": [], + "name": "CONDITIONING", + "shape": 3, + "slot_index": 0, + "type": "CONDITIONING", + "localized_name": "CONDITIONING" + } + ], + "pos": [ + 940, + 640 + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "size": { + "0": 263, + "1": 99 + }, + "title": "Negative", + "type": "CLIPTextEncode", + "widgets_values": [ + "" + ] + }, + { + "flags": {}, + "index": 3, + "inputs": [ + { + "link": null, + "name": "model", + "type": "MODEL", + "localized_name": "model" + }, + { + "link": null, + "name": "clip", + "type": "CLIP", + "localized_name": "clip" + }, + { + "link": null, + "name": "vae", + "type": "VAE", + "localized_name": "vae" + }, + { + "link": null, + "name": "positive", + "type": "CONDITIONING", + "localized_name": "positive" + }, + { + "link": null, + "name": "negative", + "type": "CONDITIONING", + "localized_name": "negative" + } + ], + "mode": 0, + "order": 3, + "outputs": [ + { + "links": null, + "name": "basic_pipe", + "shape": 3, + "slot_index": 0, + "type": "BASIC_PIPE", + "localized_name": "basic_pipe" + } + ], + "pos": [ + 1320, + 360 + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + }, + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "type": "ToBasicPipe" + } + ], + "packname": "Impact", + "version": "1.0" + } + }, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "node_versions": { + "comfy-core": "0.3.14", + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/3-SEGSDetailer.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/3-SEGSDetailer.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c45308643b46f8cf836032b7522fe70c6eff38b --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/3-SEGSDetailer.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f4aed9e96b46408fba0ed644ce1a86ffa7ca3cfd373c2849a2cc1fc5f243175 +size 42824 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/3-SEGSDetailer.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/3-SEGSDetailer.json new file mode 100644 index 0000000000000000000000000000000000000000..5f9f950b7c4ead12c3e03be2f5774536880e5d75 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/3-SEGSDetailer.json @@ -0,0 +1,1056 @@ +{ + "last_node_id": 19, + "last_link_id": 30, + "nodes": [ + { + "id": 8, + "type": "SAMLoader", + "pos": [ + 60, + 530 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "shape": 3, + "links": [ + 7 + ] + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "UltralyticsDetectorProvider", + "pos": [ + 60, + 390 + ], + "size": [ + 315, + 78 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "shape": 3, + "links": [ + 6 + ] + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 14, + "type": "Reroute", + "pos": [ + 570, + 330 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 18 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 19 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 15, + "type": "Reroute", + "pos": [ + 1240, + 330 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 19 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 24, + 26 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 16, + "type": "Reroute", + "pos": [ + 1740, + 330 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 24 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 25 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 17, + "type": "Reroute", + "pos": [ + 1390, + 390 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "pos": [ + 37.5, + 0 + ], + "link": 26 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 27, + 28 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": true + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 13, + "type": "SEGSPaste", + "pos": [ + 1860, + 510 + ], + "size": [ + 570, + 122 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 25 + }, + { + "name": "segs", + "type": "SEGS", + "link": 22 + }, + { + "name": "ref_image_opt", + "type": "IMAGE", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 29 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSPaste" + }, + "widgets_values": [ + 5, + 255 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 1, + "type": "LoadImage", + "pos": [ + 60, + 680 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 1, + 8, + 18 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ComfyUI_temp_xltgv_00001_.png", + "image" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 19, + "type": "workflow>MAKE_BASIC_PIPE", + "pos": [ + 60, + 70 + ], + "size": [ + 400, + 200 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 30 + ] + } + ], + "properties": { + "Node name for S&R": "workflow/MAKE_BASIC_PIPE" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors", + "best quality:1.4, detailed, (goth:0.8)", + "low quality:1.4, worst quality:1.4" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 6, + "type": "SEGSPreview", + "pos": [ + 1460, + 600 + ], + "size": [ + 320, + 314 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 5 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "shape": 7, + "link": 27 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 6, + "links": null + } + ], + "properties": { + "Node name for S&R": "SEGSPreview" + }, + "widgets_values": [ + true, + 0.2 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 4, + "type": "SEGSDetailer", + "pos": [ + 960, + 530 + ], + "size": [ + 440, + 734 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 8 + }, + { + "name": "segs", + "type": "SEGS", + "link": 3 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 30, + "slot_index": 2 + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "segs", + "type": "SEGS", + "shape": 3, + "links": [ + 5, + 22 + ], + "slot_index": 0 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SEGSDetailer" + }, + "widgets_values": [ + 256, + true, + 768, + 1021210429641780, + "fixed", + 20, + 8, + "euler", + "normal", + 0.3, + true, + false, + 0.2, + 1, + 1, + false, + 20 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 5, + "type": "PreviewImage", + "pos": [ + 1460, + 940 + ], + "size": [ + 320, + 310 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 28 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 18, + "type": "PreviewImage", + "pos": [ + 1860, + 690 + ], + "size": [ + 570, + 560 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 29 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 2, + "type": "ImpactSimpleDetectorSEGS", + "pos": [ + 570, + 530 + ], + "size": [ + 315, + 310 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 6, + "slot_index": 0 + }, + { + "name": "image", + "type": "IMAGE", + "link": 1 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "shape": 7, + "link": 7, + "slot_index": 2 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 3 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactSimpleDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 0, + 3, + 10, + 0.5, + 0, + 0, + 0.7, + 0 + ], + "color": "#322", + "bgcolor": "#533" + } + ], + "links": [ + [ + 1, + 1, + 0, + 2, + 1, + "IMAGE" + ], + [ + 3, + 2, + 0, + 4, + 1, + "SEGS" + ], + [ + 5, + 4, + 0, + 6, + 0, + "SEGS" + ], + [ + 6, + 7, + 0, + 2, + 0, + "BBOX_DETECTOR" + ], + [ + 7, + 8, + 0, + 2, + 2, + "SAM_MODEL" + ], + [ + 8, + 1, + 0, + 4, + 0, + "IMAGE" + ], + [ + 18, + 1, + 0, + 14, + 0, + "*" + ], + [ + 19, + 14, + 0, + 15, + 0, + "*" + ], + [ + 22, + 4, + 0, + 13, + 1, + "SEGS" + ], + [ + 24, + 15, + 0, + 16, + 0, + "*" + ], + [ + 25, + 16, + 0, + 13, + 0, + "IMAGE" + ], + [ + 26, + 15, + 0, + 17, + 0, + "*" + ], + [ + 27, + 17, + 0, + 6, + 1, + "IMAGE" + ], + [ + 28, + 17, + 0, + 5, + 0, + "IMAGE" + ], + [ + 29, + 13, + 0, + 18, + 0, + "IMAGE" + ], + [ + 30, + 19, + 0, + 4, + 2, + "BASIC_PIPE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "groupNodes": { + "MAKE_BASIC_PIPE": { + "nodes": [ + { + "type": "CheckpointLoaderSimple", + "pos": [ + 140, + 150 + ], + "size": { + "0": 421.5882568359375, + "1": 98 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "MODEL" + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [], + "shape": 3, + "slot_index": 1, + "localized_name": "CLIP" + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "shape": 3, + "localized_name": "VAE" + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors" + ], + "color": "#222", + "bgcolor": "#000", + "index": 0, + "inputs": [] + }, + { + "type": "CLIPTextEncode", + "pos": [ + 740, + 60 + ], + "size": { + "0": 256.9515686035156, + "1": 76.1346435546875 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "best quality:1.4, detailed, (goth:0.8)" + ], + "color": "#222", + "bgcolor": "#000", + "index": 1 + }, + { + "type": "CLIPTextEncode", + "pos": [ + 740, + 270 + ], + "size": { + "0": 258.04248046875, + "1": 79.95282745361328 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "slot_index": 0, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "low quality:1.4, worst quality:1.4" + ], + "color": "#222", + "bgcolor": "#000", + "index": 2 + }, + { + "type": "ToBasicPipe", + "pos": [ + 1240, + 150 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": null, + "localized_name": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "slot_index": 2, + "localized_name": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": null, + "localized_name": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null, + "localized_name": "negative" + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "basic_pipe" + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + }, + "color": "#222", + "bgcolor": "#000", + "index": 3 + } + ], + "links": [ + [ + 0, + 1, + 1, + 0, + 9, + "CLIP" + ], + [ + 0, + 1, + 2, + 0, + 9, + "CLIP" + ], + [ + 0, + 0, + 3, + 0, + 9, + "MODEL" + ], + [ + 0, + 1, + 3, + 1, + 9, + "CLIP" + ], + [ + 0, + 2, + 3, + 2, + 9, + "VAE" + ], + [ + 1, + 0, + 3, + 3, + 10, + "CONDITIONING" + ], + [ + 2, + 0, + 3, + 4, + 11, + "CONDITIONING" + ] + ], + "external": [ + [ + 3, + 0, + "BASIC_PIPE" + ] + ] + } + }, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "ds": { + "scale": 0.7513148009015777, + "offset": [ + 158.41700000000017, + 158.82600000000025 + ] + }, + "node_versions": { + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4", + "comfyui-impact-subpack": "74db20c95eca152a6d686c914edc0ef4e4762cb8", + "comfy-core": "0.3.14" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/4-MakeTileSEGS-Upscale.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/4-MakeTileSEGS-Upscale.jpg new file mode 100644 index 0000000000000000000000000000000000000000..307f1959556bbbfe8e117b3f31e9b9867eaff30d --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/4-MakeTileSEGS-Upscale.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1d3d9a3563821ba82a6277610e83ec2fe20473679ebb25c70c811a252e06ecd +size 108834 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/4-MakeTileSEGS-Upscale.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/4-MakeTileSEGS-Upscale.json new file mode 100644 index 0000000000000000000000000000000000000000..34260d4e6b5af1b3dc8ec836679771699811018e --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/4-MakeTileSEGS-Upscale.json @@ -0,0 +1,1627 @@ +{ + "last_node_id": 67, + "last_link_id": 115, + "nodes": [ + { + "id": 31, + "type": "Reroute", + "pos": [ + 1170, + 730 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 61 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 59, + 60 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 32, + "type": "SAMLoader", + "pos": [ + -160, + 840 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "shape": 3, + "links": [ + 62 + ] + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ] + }, + { + "id": 24, + "type": "UltralyticsDetectorProvider", + "pos": [ + -160, + 700 + ], + "size": [ + 315, + 78 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "shape": 3, + "links": [ + 35 + ] + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "shape": 3, + "links": [], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "segm/person_yolov8m-seg.pt" + ] + }, + { + "id": 9, + "type": "ImageScaleBy", + "pos": [ + 280, + 290 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 8, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 10, + 28 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScaleBy" + }, + "widgets_values": [ + "lanczos", + 2 + ] + }, + { + "id": 52, + "type": "Reroute", + "pos": [ + 1816.5716552734375, + 473.7144470214844 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 106 + } + ], + "outputs": [ + { + "name": "", + "type": "SEGS", + "links": [ + 98 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 53, + "type": "Reroute", + "pos": [ + 1180, + 1540 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 110 + } + ], + "outputs": [ + { + "name": "", + "type": "SEGS", + "links": [ + 100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 19, + "type": "workflow>MAKE_BASIC_PIPE", + "pos": [ + 1440, + 850 + ], + "size": [ + 420, + 170 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 76 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "workflow/MAKE_BASIC_PIPE" + }, + "widgets_values": [ + "SDXL/MOHAWK_v20BackedVAE.safetensors", + "photograph of a girl, metalic robotic body, sun rising, snow field, hdr, cropped,", + "deformed, blurry, leather, fabric\n" + ] + }, + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 2990, + 730 + ], + "size": [ + 610.069580078125, + 774.6857299804688 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 96 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [] + }, + { + "id": 54, + "type": "Reroute", + "pos": [ + 2390, + 1540 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 100 + } + ], + "outputs": [ + { + "name": "", + "type": "SEGS", + "links": [ + 101 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 51, + "type": "DetailerForEachDebugPipe", + "pos": [ + 2510, + 730 + ], + "size": [ + 410, + 996 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 95, + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "link": 101 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 94 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": null + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 96 + ], + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "shape": 3, + "links": null + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": null + }, + { + "name": "cropped", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 4 + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 5 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 6 + } + ], + "properties": { + "Node name for S&R": "DetailerForEachDebugPipe" + }, + "widgets_values": [ + 64, + true, + 1024, + 522790177337692, + "fixed", + 20, + 8, + "dpmpp_3m_sde_gpu", + "karras", + 0.4, + 10, + true, + true, + "[CONCAT] red double bun, metalic arm, zoey", + 0.2, + 1, + false, + 50, + false, + false + ] + }, + { + "id": 20, + "type": "Reroute", + "pos": [ + 660, + 730 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 28 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 61, + 107, + 111 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 28, + "type": "SEGSPreview", + "pos": [ + 1279, + 1610 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 109, + "slot_index": 0 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "shape": 7, + "link": 59, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSPreview" + }, + "widgets_values": [ + true, + 0.1 + ] + }, + { + "id": 56, + "type": "ImpactMakeTileSEGS", + "pos": [ + 780, + 470 + ], + "size": [ + 315, + 218 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 111 + }, + { + "name": "filter_in_segs_opt", + "type": "SEGS", + "shape": 7, + "link": null + }, + { + "name": "filter_out_segs_opt", + "type": "SEGS", + "shape": 7, + "link": 114 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 105, + 106 + ] + } + ], + "properties": { + "Node name for S&R": "ImpactMakeTileSEGS" + }, + "widgets_values": [ + 768, + 1.5, + 200, + 30, + 0.7000000000000001, + "Reuse fast" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 6, + "type": "SEGSPreview", + "pos": [ + 1292, + 268 + ], + "size": [ + 430.35296630859375, + 388.4536437988281 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 105, + "slot_index": 0 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "shape": 7, + "link": 10, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSPreview" + }, + "widgets_values": [ + true, + 0.1 + ] + }, + { + "id": 60, + "type": "Note", + "pos": [ + -1033, + 292 + ], + "size": [ + 638.3837890625, + 178.84756469726562 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "1.Intro", + "properties": { + "text": "" + }, + "widgets_values": [ + "This video demonstrates how to apply the newly added \"Make Tile SEGS\" in the Impact Pack to upscale using the upscale method.\n\n\"Make Tile SEGS\" node splits the image into tiles and creates SEGS.\n\nBy using this, you can mimic the tile-based upscale function and, if the detected SEGS is too large, you can also split it for detailing." + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 2, + "type": "LoadImage", + "pos": [ + -160, + 290 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 8, + 34 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "20240107_013.webp", + "image" + ] + }, + { + "id": 62, + "type": "Note", + "pos": [ + 190, + 60 + ], + "size": [ + 396.33758544921875, + 127.46672821044922 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "2. Simple Upscale", + "properties": { + "text": "" + }, + "widgets_values": [ + "First, let's upscale the original 1024x1536 image to double its size.\n\nSimply upscale by 2x using the \"Upscale Image Scale By\". \nThe result will, of course, be blurry.\n" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 61, + "type": "Note", + "pos": [ + 780, + 35 + ], + "size": [ + 677.756591796875, + 157.3253173828125 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "3. Make Tile SEGS", + "properties": { + "text": "" + }, + "widgets_values": [ + "Let's process this image into SEGS using \"Make Tile SEGS\".\n\nYou can see that SEGS is structured so that every part of the image can be included in the mask area.\n\nUnlike the traditional tile upscaler, this method uses Detailer, so you can improve tile heterogeneity using the 'crop_factor'.\n\nAlso, setting 'mask_irregularity' to 0.7 will make the mask border irregular, improving the heterogeneity of the junctions." + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 63, + "type": "Note", + "pos": [ + -108, + 1056 + ], + "size": [ + 709.2979736328125, + 143.4364013671875 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "4.Human SEGS", + "properties": { + "text": "" + }, + "widgets_values": [ + "Next, let's separate the background and the person to alleviate the noticeable artifacts, especially in the case of humans.\n\nApply the person ultralytics model to the \"Simple Detector\" to create SEGS containing the entire person.\n\nConnect the SEGS to the 'filter_out_segs_opt' in one \"Make Tile SEGS\" node, \nand in the other \"Make Tile SEGS\" node, connect it to the 'filter_in_segs_opt'." + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 65, + "type": "Note", + "pos": [ + 776, + 803 + ], + "size": [ + 620.825927734375, + 163.94039916992188 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "5. filter_out_segs_opt", + "properties": { + "text": "" + }, + "widgets_values": [ + "The node connected to 'filter_out_segs_opt' creates SEGS excluding the mask of the input SEGS, allowing you to detail the background tiles.\n\n'min_overlap' determines how much the masks of each SEGS should overlap, and 'filter_segs_dilation' dilates the mask of the input SEGS.\n\nIncreasing 'filter_segs_dilation' in 'filter_out_segs_opt' generates masks further away from the person." + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 66, + "type": "Note", + "pos": [ + 814, + 2007 + ], + "size": [ + 620.825927734375, + 163.94039916992188 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "6. filter_in_segs_opt", + "properties": { + "text": "" + }, + "widgets_values": [ + "On the other hand, the node connected to 'filter_in_segs_opt' creates SEGS with masks overlapping the input SEGS, allowing you to detail the person.\n\nSince detailing the person requires more attention than the background, increase 'bbox_size' to avoid creating small pieces, and increase 'min_overlap' to reduce junction artifacts and allow overlapping detailing.\n" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 67, + "type": "Note", + "pos": [ + 1955, + 1805 + ], + "size": [ + 620.825927734375, + 163.94039916992188 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "7. Detailing", + "properties": { + "text": "" + }, + "widgets_values": [ + "Now, using the SEGS created in this way, let's improve the upscaled image using two Detailer nodes.\n\nAlthough you can handle this with \"SEGS Concat\", separating into two Detailer nodes allows for separate options for background and person detailing.\n\nThis way, when modifying the detailing options for a person, you can prevent the recalculation of background detailing.\n" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 64, + "type": "Note", + "pos": [ + 2994, + 500 + ], + "size": [ + 620.825927734375, + 163.94039916992188 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "8. Result", + "properties": { + "text": "" + }, + "widgets_values": [ + "It seems that the image has upscaled well without significant artifacts in the 2048x3072 size." + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 10, + "type": "DetailerForEachDebugPipe", + "pos": [ + 1960, + 730 + ], + "size": [ + 410, + 996 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 60, + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "link": 98 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 76 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": null + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 95 + ], + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "shape": 3, + "links": null + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 94 + ], + "slot_index": 2 + }, + { + "name": "cropped", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 4 + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 5 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 6 + } + ], + "properties": { + "Node name for S&R": "DetailerForEachDebugPipe" + }, + "widgets_values": [ + 64, + true, + 1024, + 522790177337686, + "fixed", + 20, + 8, + "dpmpp_2m_sde_gpu", + "karras", + 0.46, + 10, + true, + true, + "", + 0.2, + 1, + false, + 10, + false, + false + ] + }, + { + "id": 57, + "type": "ImpactMakeTileSEGS", + "pos": [ + 820, + 1610 + ], + "size": [ + 315, + 218 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 107 + }, + { + "name": "filter_in_segs_opt", + "type": "SEGS", + "shape": 7, + "link": 115 + }, + { + "name": "filter_out_segs_opt", + "type": "SEGS", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 109, + 110 + ] + } + ], + "properties": { + "Node name for S&R": "ImpactMakeTileSEGS" + }, + "widgets_values": [ + 1200, + 1.4000000000000001, + 200, + 100, + 0.7000000000000001, + "Reuse fast" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 22, + "type": "ImpactSimpleDetectorSEGS", + "pos": [ + 282, + 699 + ], + "size": [ + 315, + 310 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 35, + "slot_index": 0 + }, + { + "name": "image", + "type": "IMAGE", + "link": 34, + "slot_index": 1 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "shape": 7, + "link": 62, + "slot_index": 2 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 114, + 115 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactSimpleDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 0, + 3, + 10, + 0.5, + 0, + 0, + 0.7000000000000001, + 0 + ] + } + ], + "links": [ + [ + 8, + 2, + 0, + 9, + 0, + "IMAGE" + ], + [ + 10, + 9, + 0, + 6, + 1, + "IMAGE" + ], + [ + 28, + 9, + 0, + 20, + 0, + "*" + ], + [ + 34, + 2, + 0, + 22, + 1, + "IMAGE" + ], + [ + 35, + 24, + 0, + 22, + 0, + "BBOX_DETECTOR" + ], + [ + 59, + 31, + 0, + 28, + 1, + "IMAGE" + ], + [ + 60, + 31, + 0, + 10, + 0, + "IMAGE" + ], + [ + 61, + 20, + 0, + 31, + 0, + "*" + ], + [ + 62, + 32, + 0, + 22, + 2, + "SAM_MODEL" + ], + [ + 76, + 19, + 0, + 10, + 2, + "BASIC_PIPE" + ], + [ + 94, + 10, + 2, + 51, + 2, + "BASIC_PIPE" + ], + [ + 95, + 10, + 0, + 51, + 0, + "IMAGE" + ], + [ + 96, + 51, + 0, + 16, + 0, + "IMAGE" + ], + [ + 98, + 52, + 0, + 10, + 1, + "SEGS" + ], + [ + 100, + 53, + 0, + 54, + 0, + "*" + ], + [ + 101, + 54, + 0, + 51, + 1, + "SEGS" + ], + [ + 105, + 56, + 0, + 6, + 0, + "SEGS" + ], + [ + 106, + 56, + 0, + 52, + 0, + "*" + ], + [ + 107, + 20, + 0, + 57, + 0, + "IMAGE" + ], + [ + 109, + 57, + 0, + 28, + 0, + "SEGS" + ], + [ + 110, + 57, + 0, + 53, + 0, + "*" + ], + [ + 111, + 20, + 0, + 56, + 0, + "IMAGE" + ], + [ + 114, + 22, + 0, + 56, + 2, + "SEGS" + ], + [ + 115, + 22, + 0, + 57, + 1, + "SEGS" + ] + ], + "groups": [], + "config": {}, + "extra": { + "groupNodes": { + "MAKE_BASIC_PIPE": { + "nodes": [ + { + "type": "CheckpointLoaderSimple", + "pos": [ + -80, + 1100 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "MODEL" + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [], + "shape": 3, + "slot_index": 1, + "localized_name": "CLIP" + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "shape": 3, + "slot_index": 2, + "localized_name": "VAE" + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/majicmixRealistic_v7.safetensors" + ], + "index": 0, + "inputs": [] + }, + { + "type": "CLIPTextEncode", + "pos": [ + 455, + 1026 + ], + "size": { + "0": 210, + "1": 104.50106048583984 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photograph, 4k, hdr, cropped, 1girl sit, blur hair, pink bag" + ], + "index": 1 + }, + { + "type": "CLIPTextEncode", + "pos": [ + 456, + 1239 + ], + "size": { + "0": 210, + "1": 104.50106048583984 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "slot_index": 0, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "localized_name": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "deformed, blurry\n" + ], + "index": 2 + }, + { + "type": "ToBasicPipe", + "pos": [ + 800, + 1100 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": null, + "localized_name": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": null, + "slot_index": 1, + "localized_name": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "localized_name": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": null, + "localized_name": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null, + "slot_index": 4, + "localized_name": "negative" + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "basic_pipe" + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + }, + "index": 3 + } + ], + "links": [ + [ + 0, + 1, + 1, + 0, + 11, + "CLIP" + ], + [ + 0, + 1, + 2, + 0, + 11, + "CLIP" + ], + [ + 0, + 0, + 3, + 0, + 11, + "MODEL" + ], + [ + 0, + 1, + 3, + 1, + 11, + "CLIP" + ], + [ + 0, + 2, + 3, + 2, + 11, + "VAE" + ], + [ + 1, + 0, + 3, + 3, + 13, + "CONDITIONING" + ], + [ + 2, + 0, + 3, + 4, + 14, + "CONDITIONING" + ] + ], + "external": [ + [ + 3, + 0, + "BASIC_PIPE" + ] + ] + } + }, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "ds": { + "scale": 1.4641000000000006, + "offset": { + "0": -481.44390869140625, + "1": -92.16561126708984 + } + }, + "node_versions": { + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4", + "comfyui-impact-subpack": "74db20c95eca152a6d686c914edc0ef4e4762cb8", + "comfy-core": "0.3.14" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-PreviewDetailerHookProvider.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-PreviewDetailerHookProvider.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5fcb464503b3fee737e5eb22d6f5cf161feb34f --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-PreviewDetailerHookProvider.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f37a67d0b42e4d94950a4d27de403b022d684824571021c702196a2989bf8349 +size 68539 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-PreviewDetailerHookProvider.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-PreviewDetailerHookProvider.json new file mode 100644 index 0000000000000000000000000000000000000000..f145b8861ed16bff2b435e596285fd2af431a602 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-PreviewDetailerHookProvider.json @@ -0,0 +1,1629 @@ +{ + "last_node_id": 70, + "last_link_id": 125, + "nodes": [ + { + "id": 31, + "type": "Reroute", + "pos": [ + 1170, + 730 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 61 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 59, + 60 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 32, + "type": "SAMLoader", + "pos": [ + -160, + 840 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "shape": 3, + "links": [ + 62 + ] + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ] + }, + { + "id": 24, + "type": "UltralyticsDetectorProvider", + "pos": [ + -160, + 700 + ], + "size": [ + 315, + 78 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "shape": 3, + "links": [ + 35 + ] + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "shape": 3, + "links": [], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "segm/person_yolov8m-seg.pt" + ] + }, + { + "id": 53, + "type": "Reroute", + "pos": [ + 1180, + 1540 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 110 + } + ], + "outputs": [ + { + "name": "", + "type": "SEGS", + "links": [ + 100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 20, + "type": "Reroute", + "pos": [ + 660, + 730 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 28 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 61, + 107, + 111 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 28, + "type": "SEGSPreview", + "pos": [ + 1279, + 1610 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 109, + "slot_index": 0 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "shape": 7, + "link": 59, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSPreview" + }, + "widgets_values": [ + true, + 0.1 + ] + }, + { + "id": 6, + "type": "SEGSPreview", + "pos": [ + 1292, + 268 + ], + "size": [ + 430.35296630859375, + 388.4536437988281 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 105, + "slot_index": 0 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "shape": 7, + "link": 10, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSPreview" + }, + "widgets_values": [ + true, + 0.1 + ] + }, + { + "id": 57, + "type": "ImpactMakeTileSEGS", + "pos": [ + 820, + 1610 + ], + "size": [ + 315, + 218 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 107 + }, + { + "name": "filter_in_segs_opt", + "type": "SEGS", + "shape": 7, + "link": 115 + }, + { + "name": "filter_out_segs_opt", + "type": "SEGS", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 109, + 110 + ] + } + ], + "properties": { + "Node name for S&R": "ImpactMakeTileSEGS" + }, + "widgets_values": [ + 1200, + 1.4000000000000001, + 200, + 100, + 0.7000000000000001, + "Reuse fast" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 22, + "type": "ImpactSimpleDetectorSEGS", + "pos": [ + 282, + 699 + ], + "size": [ + 315, + 310 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 35, + "slot_index": 0 + }, + { + "name": "image", + "type": "IMAGE", + "link": 34, + "slot_index": 1 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "shape": 7, + "link": 62, + "slot_index": 2 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "shape": 7, + "link": null, + "slot_index": 3 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 114, + 115 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactSimpleDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 0, + 3, + 10, + 0.5, + 0, + 0, + 0.7000000000000001, + 0 + ] + }, + { + "id": 2, + "type": "LoadImage", + "pos": [ + -160, + 290 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 8, + 34 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "combination-2pass-original.png", + "image" + ] + }, + { + "id": 9, + "type": "ImageScaleBy", + "pos": [ + 280, + 290 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 8, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 10, + 28 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScaleBy" + }, + "widgets_values": [ + "lanczos", + 3 + ] + }, + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 2990, + 730 + ], + "size": [ + 610.069580078125, + 774.6857299804688 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 96 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [] + }, + { + "id": 68, + "type": "PreviewDetailerHookProvider", + "pos": [ + 943, + -1972 + ], + "size": [ + 1360.0478515625, + 1943.85986328125 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "DETAILER_HOOK", + "type": "DETAILER_HOOK", + "shape": 3, + "links": [ + 120 + ], + "slot_index": 0 + }, + { + "name": "UPSCALER_HOOK", + "type": "UPSCALER_HOOK", + "links": null + } + ], + "title": "PreviewDetailerHookProvider - Live Preview", + "properties": { + "Node name for S&R": "PreviewDetailerHookProvider" + }, + "widgets_values": [ + 95 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 69, + "type": "Reroute", + "pos": [ + 2360, + -1920 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "pos": [ + 37.5, + 0 + ], + "link": 120 + } + ], + "outputs": [ + { + "name": "", + "type": "DETAILER_HOOK", + "links": [ + 121 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": true + } + }, + { + "id": 19, + "type": "workflow>MAKE_BASIC_PIPE", + "pos": [ + 1440, + 850 + ], + "size": [ + 451.0836486816406, + 279.9571533203125 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 76 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "workflow/MAKE_BASIC_PIPE" + }, + "widgets_values": [ + "SDXL/MOHAWK_v20BackedVAE.safetensors", + "cinematic photograph of a girl is walking, cinematic lighting, white inddor", + "deformed, blurry, \n" + ] + }, + { + "id": 52, + "type": "Reroute", + "pos": [ + 2330, + 470 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 106 + } + ], + "outputs": [ + { + "name": "", + "type": "SEGS", + "links": [ + 119 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 54, + "type": "Reroute", + "pos": [ + 1780, + 1540 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 100 + } + ], + "outputs": [ + { + "name": "", + "type": "SEGS", + "links": [ + 118 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 56, + "type": "ImpactMakeTileSEGS", + "pos": [ + 780, + 470 + ], + "size": [ + 315, + 218 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 111 + }, + { + "name": "filter_in_segs_opt", + "type": "SEGS", + "shape": 7, + "link": null + }, + { + "name": "filter_out_segs_opt", + "type": "SEGS", + "shape": 7, + "link": 114 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 105, + 106 + ] + } + ], + "properties": { + "Node name for S&R": "ImpactMakeTileSEGS" + }, + "widgets_values": [ + 768, + 1.5, + 200, + 0, + 0.7000000000000001, + "Reuse fast" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 10, + "type": "DetailerForEachDebugPipe", + "pos": [ + 1960, + 730 + ], + "size": [ + 410, + 996 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 60, + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "link": 118 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 76 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": 124, + "slot_index": 3 + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 95 + ], + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "shape": 3, + "links": null + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 94 + ], + "slot_index": 2 + }, + { + "name": "cropped", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 4 + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 5 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 6 + } + ], + "title": "DetailerDebug (SEGS/pipe) - person", + "properties": { + "Node name for S&R": "DetailerForEachDebugPipe" + }, + "widgets_values": [ + 64, + true, + 1024, + 522790177337686, + "fixed", + 20, + 8, + "dpmpp_3m_sde_gpu", + "karras", + 0.45, + 10, + true, + true, + "", + 0.2, + 1, + false, + 10, + false, + false + ] + }, + { + "id": 51, + "type": "DetailerForEachDebugPipe", + "pos": [ + 2510, + 730 + ], + "size": [ + 410, + 996 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 95, + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "link": 119 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 94 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": 125 + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "shape": 7, + "link": null + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 96 + ], + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "shape": 3, + "links": null + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": null + }, + { + "name": "cropped", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 4 + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 5 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": [], + "slot_index": 6 + } + ], + "title": "DetailerDebug (SEGS/pipe) - background", + "properties": { + "Node name for S&R": "DetailerForEachDebugPipe" + }, + "widgets_values": [ + 64, + true, + 1024, + 522790177337693, + "fixed", + 20, + 8, + "dpmpp_2m_sde_gpu", + "karras", + 0.4, + 10, + true, + true, + "[CONCAT] red double bun, metalic arm, zoey", + 0.2, + 1, + false, + 50, + false, + false + ] + }, + { + "id": 60, + "type": "Note", + "pos": [ + -1033, + 292 + ], + "size": [ + 638.3837890625, + 178.84756469726562 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "1.Intro", + "properties": { + "text": "" + }, + "widgets_values": [ + "Using nodes like Make Tile SEGS for Detailer work will result in processing SEGS within a large number of Detailer nodes.\n\nPreviewDetailerHookProvider is connected to Detailers to monitor intermediate processes.\n" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 62, + "type": "Note", + "pos": [ + 364, + -1967 + ], + "size": [ + 552.4130859375, + 204.45199584960938 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "2. PreviewDetailerHookProvider", + "properties": { + "text": "" + }, + "widgets_values": [ + "To add PreviewDetailerHookProvider, simply connect it to the detailer_hook input of the Detailer node you want to monitor.\n\nThis node can also be used in the Detailer For AnimateDiff node.\n\nHowever, since this node provides a preview hook for pasting onto the original image, it cannot be used in SEGSDetailer where there is no pasting step.\n\n\n\nNow let's give it a try." + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 70, + "type": "Reroute", + "pos": [ + 2360, + 310 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "pos": [ + 37.5, + 0 + ], + "link": 121 + } + ], + "outputs": [ + { + "name": "", + "type": "DETAILER_HOOK", + "links": [ + 124, + 125 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": true + } + } + ], + "links": [ + [ + 8, + 2, + 0, + 9, + 0, + "IMAGE" + ], + [ + 10, + 9, + 0, + 6, + 1, + "IMAGE" + ], + [ + 28, + 9, + 0, + 20, + 0, + "*" + ], + [ + 34, + 2, + 0, + 22, + 1, + "IMAGE" + ], + [ + 35, + 24, + 0, + 22, + 0, + "BBOX_DETECTOR" + ], + [ + 59, + 31, + 0, + 28, + 1, + "IMAGE" + ], + [ + 60, + 31, + 0, + 10, + 0, + "IMAGE" + ], + [ + 61, + 20, + 0, + 31, + 0, + "*" + ], + [ + 62, + 32, + 0, + 22, + 2, + "SAM_MODEL" + ], + [ + 76, + 19, + 0, + 10, + 2, + "BASIC_PIPE" + ], + [ + 94, + 10, + 2, + 51, + 2, + "BASIC_PIPE" + ], + [ + 95, + 10, + 0, + 51, + 0, + "IMAGE" + ], + [ + 96, + 51, + 0, + 16, + 0, + "IMAGE" + ], + [ + 100, + 53, + 0, + 54, + 0, + "*" + ], + [ + 105, + 56, + 0, + 6, + 0, + "SEGS" + ], + [ + 106, + 56, + 0, + 52, + 0, + "*" + ], + [ + 107, + 20, + 0, + 57, + 0, + "IMAGE" + ], + [ + 109, + 57, + 0, + 28, + 0, + "SEGS" + ], + [ + 110, + 57, + 0, + 53, + 0, + "*" + ], + [ + 111, + 20, + 0, + 56, + 0, + "IMAGE" + ], + [ + 114, + 22, + 0, + 56, + 2, + "SEGS" + ], + [ + 115, + 22, + 0, + 57, + 1, + "SEGS" + ], + [ + 118, + 54, + 0, + 10, + 1, + "SEGS" + ], + [ + 119, + 52, + 0, + 51, + 1, + "SEGS" + ], + [ + 120, + 68, + 0, + 69, + 0, + "*" + ], + [ + 121, + 69, + 0, + 70, + 0, + "*" + ], + [ + 124, + 70, + 0, + 10, + 3, + "DETAILER_HOOK" + ], + [ + 125, + 70, + 0, + 51, + 3, + "DETAILER_HOOK" + ] + ], + "groups": [], + "config": {}, + "extra": { + "groupNodes": { + "MAKE_BASIC_PIPE": { + "nodes": [ + { + "type": "CheckpointLoaderSimple", + "pos": [ + -80, + 1100 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "MODEL" + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [], + "shape": 3, + "slot_index": 1, + "localized_name": "CLIP" + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "shape": 3, + "slot_index": 2, + "localized_name": "VAE" + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/majicmixRealistic_v7.safetensors" + ], + "index": 0, + "inputs": [] + }, + { + "type": "CLIPTextEncode", + "pos": [ + 455, + 1026 + ], + "size": { + "0": 210, + "1": 104.50106048583984 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photograph, 4k, hdr, cropped, 1girl sit, blur hair, pink bag" + ], + "index": 1 + }, + { + "type": "CLIPTextEncode", + "pos": [ + 456, + 1239 + ], + "size": { + "0": 210, + "1": 104.50106048583984 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "slot_index": 0, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "localized_name": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "deformed, blurry\n" + ], + "index": 2 + }, + { + "type": "ToBasicPipe", + "pos": [ + 800, + 1100 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": null, + "localized_name": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": null, + "slot_index": 1, + "localized_name": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "localized_name": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": null, + "localized_name": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null, + "slot_index": 4, + "localized_name": "negative" + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [], + "shape": 3, + "slot_index": 0, + "localized_name": "basic_pipe" + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + }, + "index": 3 + } + ], + "links": [ + [ + 0, + 1, + 1, + 0, + 11, + "CLIP" + ], + [ + 0, + 1, + 2, + 0, + 11, + "CLIP" + ], + [ + 0, + 0, + 3, + 0, + 11, + "MODEL" + ], + [ + 0, + 1, + 3, + 1, + 11, + "CLIP" + ], + [ + 0, + 2, + 3, + 2, + 11, + "VAE" + ], + [ + 1, + 0, + 3, + 3, + 13, + "CONDITIONING" + ], + [ + 2, + 0, + 3, + 4, + 14, + "CONDITIONING" + ] + ], + "external": [ + [ + 3, + 0, + "BASIC_PIPE" + ] + ] + } + }, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "ds": { + "scale": 0.620921323059155, + "offset": [ + 432.38467086326943, + 608.3387630215522 + ] + }, + "node_versions": { + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4", + "comfyui-impact-subpack": "74db20c95eca152a6d686c914edc0ef4e4762cb8", + "comfy-core": "0.3.14" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-prompt-per-tile.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-prompt-per-tile.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b5447c266791208d4e2246cbac098480d3e1179 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-prompt-per-tile.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:232bde22503e087e47c391bb91bbcc830c9882889ac715c1ec139667ea3d2c03 +size 130918 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-prompt-per-tile.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-prompt-per-tile.json new file mode 100644 index 0000000000000000000000000000000000000000..bea9f84eb1ed9ee10ca5c1f71e79beead0bc4156 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/5-prompt-per-tile.json @@ -0,0 +1,1290 @@ +{ + "last_node_id": 30, + "last_link_id": 50, + "nodes": [ + { + "id": 3, + "type": "KSampler", + "pos": [ + -160, + -150 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 2, + "fixed", + 20, + 7, + "dpmpp_2m", + "karras", + 1 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -580, + -300 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1, + 25 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5, + 26 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8, + 27 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/noosphere_v42.safetensors" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + -567, + 312 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 768, + 1 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -610, + -150 + ], + "size": [ + 422.84503173828125, + 164.31304931640625 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4, + 28 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photo of a blonde girl and a dark haired man with beard, front view, detailed faces, high details, realistic, nature background, high saturation" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -611, + 66 + ], + "size": [ + 425.27801513671875, + 180.6060791015625 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6, + 29 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, nsfw" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + -115, + -271 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 21, + 22, + 30 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 10, + "type": "ImpactMakeTileSEGS", + "pos": [ + 840, + -90 + ], + "size": [ + 282.6341552734375, + 218 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 31 + }, + { + "name": "filter_in_segs_opt", + "type": "SEGS", + "shape": 7, + "link": null + }, + { + "name": "filter_out_segs_opt", + "type": "SEGS", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 14, + 15 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactMakeTileSEGS" + }, + "widgets_values": [ + 704, + 1.1, + 4, + 0, + 0, + "Reuse fast" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 11, + "type": "WD14Tagger|pysssss", + "pos": [ + 1901, + -282 + ], + "size": [ + 276.18115234375, + 470 + ], + "flags": { + "collapsed": false + }, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 10 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "shape": 6, + "links": [ + 39 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "WD14Tagger|pysssss" + }, + "widgets_values": [ + "wd-v1-4-moat-tagger-v2", + 0.35000000000000003, + 0.85, + true, + false, + "" + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 12, + "type": "DetailerForEach", + "pos": [ + 2881.51708984375, + -286.8627624511719 + ], + "size": [ + 310.9673767089844, + 790 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 32 + }, + { + "name": "segs", + "type": "SEGS", + "link": 12 + }, + { + "name": "model", + "type": "MODEL", + "link": 25 + }, + { + "name": "clip", + "type": "CLIP", + "link": 26 + }, + { + "name": "vae", + "type": "VAE", + "link": 27 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 28 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 29 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": null + }, + { + "name": "wildcard", + "type": "STRING", + "widget": { + "name": "wildcard" + }, + "link": 49 + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 17 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "DetailerForEach" + }, + "widgets_values": [ + 768, + true, + 1024, + 20, + "fixed", + 20, + 3.5, + "dpmpp_2m_sde_gpu", + "karras", + 0.5, + 16, + true, + true, + "", + 1, + false, + 16, + false, + false + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 13, + "type": "WD14Tagger|pysssss", + "pos": [ + 1388, + -240 + ], + "size": [ + 290, + 240 + ], + "flags": { + "collapsed": false + }, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 22 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "shape": 6, + "links": [ + 34 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "WD14Tagger|pysssss" + }, + "widgets_values": [ + "wd-v1-4-moat-tagger-v2", + 0.35000000000000003, + 0.85, + true, + false, + "" + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 14, + "type": "SEGSToImageList", + "pos": [ + 830, + 230 + ], + "size": [ + 276.6341552734375, + 46 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 14 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "shape": 7, + "link": 33 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 6, + "links": [ + 10, + 18 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSToImageList" + }, + "widgets_values": [], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 15, + "type": "ImpactSEGSLabelAssign", + "pos": [ + 2409.8916015625, + 36.29731369018555 + ], + "size": [ + 283.6341552734375, + 103.9290771484375 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 15 + }, + { + "name": "labels", + "type": "STRING", + "widget": { + "name": "labels" + }, + "link": 50 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "shape": 3, + "links": [ + 12 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactSEGSLabelAssign" + }, + "widgets_values": [ + "" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 3270, + -287 + ], + "size": [ + 842.0664672851562, + 1217.6240234375 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 17 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 17, + "type": "PreviewImage", + "pos": [ + 788, + 339 + ], + "size": [ + 421.1688537597656, + 448.1822509765625 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 18 + } + ], + "outputs": [], + "title": "Preview Tiles", + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 19, + "type": "PreviewImage", + "pos": [ + 173, + -283 + ], + "size": [ + 475.25579833984375, + 668.4122924804688 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 21 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 21, + "type": "ImageScaleBy", + "pos": [ + 840, + -270 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 30 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "shape": 3, + "links": [ + 31, + 32, + 33 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScaleBy" + }, + "widgets_values": [ + "bicubic", + 2.5 + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 25, + "type": "StringListToString", + "pos": [ + 1375, + 131 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "string_list", + "type": "STRING", + "widget": { + "name": "string_list" + }, + "link": 34 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "shape": 3, + "links": [ + 47 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "StringListToString" + }, + "widgets_values": [ + "", + "" + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 26, + "type": "StringListToString", + "pos": [ + 1913, + 259 + ], + "size": [ + 268.8372497558594, + 58 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "string_list", + "type": "STRING", + "widget": { + "name": "string_list" + }, + "link": 39 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "shape": 3, + "links": [ + 48 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "StringListToString" + }, + "widgets_values": [ + "\\n", + "" + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 30, + "type": "WildcardPromptFromString", + "pos": [ + 2396.2451171875, + -266.2974548339844 + ], + "size": [ + 315, + 198 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "string", + "type": "STRING", + "widget": { + "name": "string" + }, + "link": 48 + }, + { + "name": "restrict_to_tags", + "type": "STRING", + "widget": { + "name": "restrict_to_tags" + }, + "link": 47 + } + ], + "outputs": [ + { + "name": "wildcard", + "type": "STRING", + "shape": 3, + "links": [ + 49 + ], + "slot_index": 0 + }, + { + "name": "segs_labels", + "type": "STRING", + "shape": 3, + "links": [ + 50 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "WildcardPromptFromString" + }, + "widgets_values": [ + "", + "\\n", + "", + ", realistic, high details, high saturation", + "", + "1girl, 1boy, 2girls, multiple girls, realistic" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + } + ], + "links": [ + [ + 1, + 4, + 0, + 3, + 0, + "MODEL" + ], + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 10, + 14, + 0, + 11, + 0, + "IMAGE" + ], + [ + 12, + 15, + 0, + 12, + 1, + "SEGS" + ], + [ + 14, + 10, + 0, + 14, + 0, + "SEGS" + ], + [ + 15, + 10, + 0, + 15, + 0, + "SEGS" + ], + [ + 17, + 12, + 0, + 16, + 0, + "IMAGE" + ], + [ + 18, + 14, + 0, + 17, + 0, + "IMAGE" + ], + [ + 21, + 8, + 0, + 19, + 0, + "IMAGE" + ], + [ + 22, + 8, + 0, + 13, + 0, + "IMAGE" + ], + [ + 25, + 4, + 0, + 12, + 2, + "MODEL" + ], + [ + 26, + 4, + 1, + 12, + 3, + "CLIP" + ], + [ + 27, + 4, + 2, + 12, + 4, + "VAE" + ], + [ + 28, + 6, + 0, + 12, + 5, + "CONDITIONING" + ], + [ + 29, + 7, + 0, + 12, + 6, + "CONDITIONING" + ], + [ + 30, + 8, + 0, + 21, + 0, + "IMAGE" + ], + [ + 31, + 21, + 0, + 10, + 0, + "IMAGE" + ], + [ + 32, + 21, + 0, + 12, + 0, + "IMAGE" + ], + [ + 33, + 21, + 0, + 14, + 1, + "IMAGE" + ], + [ + 34, + 13, + 0, + 25, + 0, + "STRING" + ], + [ + 39, + 11, + 0, + 26, + 0, + "STRING" + ], + [ + 47, + 25, + 0, + 30, + 1, + "STRING" + ], + [ + 48, + 26, + 0, + 30, + 0, + "STRING" + ], + [ + 49, + 30, + 0, + 12, + 8, + "STRING" + ], + [ + 50, + 30, + 1, + 15, + 1, + "STRING" + ] + ], + "groups": [ + { + "id": 1, + "title": "Base Image", + "bounding": [ + -620, + -374, + 1311, + 872 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Upscale and Create Tiles", + "bounding": [ + 745, + -375, + 515, + 1202 + ], + "color": "#8AA", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Tag Base Image", + "bounding": [ + 1311, + -374, + 431, + 668 + ], + "color": "#b06634", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Tag Tiles", + "bounding": [ + 1815, + -378, + 460, + 750 + ], + "color": "#b06634", + "font_size": 24, + "flags": {} + }, + { + "id": 5, + "title": "Assign Prompts to Tiles", + "bounding": [ + 2367, + -381, + 380, + 615 + ], + "color": "#8AA", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Add Details", + "bounding": [ + 2834, + -382, + 1359, + 1377 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 1, + "offset": [ + 711, + 400 + ] + }, + "groupNodes": {}, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "node_versions": { + "comfy-core": "0.3.14", + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4", + "comfyui-wd14-tagger": "1.0.0" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/6-DetailerWildcard.jpg b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/6-DetailerWildcard.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e33a8423593abf7d41e5e8ce7975abbe7ba060c --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/6-DetailerWildcard.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f80a2add23387329e86abfb00382d1efd818df33a8fa19c93cdf75a478a491df +size 538538 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/6-DetailerWildcard.json b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/6-DetailerWildcard.json new file mode 100644 index 0000000000000000000000000000000000000000..9bcca67c7bd1628f282c9d6e33d8c79f2788d1e1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/example_workflows/6-DetailerWildcard.json @@ -0,0 +1,1084 @@ +{ + "last_node_id": 57, + "last_link_id": 116, + "nodes": [ + { + "id": 38, + "type": "SAMLoader", + "pos": [ + 870, + 1120 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 81 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 50, + "type": "Reroute", + "pos": [ + 1100, + 30 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 110 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 109 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 48, + "type": "FromBasicPipe", + "pos": [ + 910, + 300 + ], + "size": [ + 221.4781951904297, + 106 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 116 + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 99 + ], + "slot_index": 0 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 110 + ], + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 100 + ], + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 101 + ], + "slot_index": 4 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe" + }, + "widgets_values": [] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 490, + 190 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 512, + 1 + ] + }, + { + "id": 42, + "type": "PreviewImage", + "pos": [ + 2140, + 130 + ], + "size": [ + 793.8984985351562, + 562.4002685546875 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 107 + } + ], + "outputs": [], + "title": "Refined", + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 52, + "type": "PreviewImage", + "pos": [ + 2140, + 770 + ], + "size": [ + 800, + 580 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 113 + } + ], + "outputs": [], + "title": "Original", + "properties": { + "Node name for S&R": "PreviewImage" + }, + "widgets_values": [], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 44, + "type": "BasicPipeToDetailerPipe", + "pos": [ + 1230, + 950 + ], + "size": [ + 380, + 240 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 115, + "slot_index": 0 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 114 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "shape": 7, + "link": 81 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "shape": 7, + "link": null + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 105 + ], + "slot_index": 0 + } + ], + "title": "BasicPipe -> DetailerPipe (NEW!!)", + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipe" + }, + "widgets_values": [ + "{blue eyes, (angry:1.2)|{green eyes, mouth open|red eyes}| smile}", + "Select the LoRA to add to the text", + "Select the Wildcard to add to the text" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 51, + "type": "Reroute", + "pos": [ + 2380, + 30 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 10, + "mode": 2, + "inputs": [ + { + "name": "", + "type": "*", + "link": 111 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 113 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1430, + 60 + ], + "size": [ + 140, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 109 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 106, + 111 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 1209, + 126 + ], + "size": [ + 400, + 650 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 100 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 101 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 497844439625000, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 56, + "type": "UltralyticsDetectorProvider", + "pos": [ + 870, + 970 + ], + "size": [ + 315, + 78 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "shape": 3, + "links": [ + 114 + ], + "slot_index": 0 + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "shape": 3, + "links": null + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 57, + "type": "workflow>MAKE_BASIC_PIPE", + "pos": [ + 50, + 470 + ], + "size": [ + 410, + 360 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "shape": 3, + "links": null + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "shape": 3, + "links": [ + 115, + 116 + ] + } + ], + "properties": { + "Node name for S&R": "workflow/MAKE_BASIC_PIPE" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors", + "SD1.5/V07_v07.safetensors", + "RAW photo, delicate, best quality, colorful, 2girls, 8k uhd, film grain, soft lighting, dslr, (Fujifilm XT3), (photorealistic:1.4), (detailed skin), soft lips, (very detailed long ponytail), aged down, studio lighting, from top, colorful sports wear, happy face, spread lips, (walking), (central park, cloud, sunshine), small breast", + "(low quality:1.4), (worst quality:1.4), bad anatomy, (nsfw:1.2), muscle, from back, from front, monochrome, (bikini:1.2)" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 49, + "type": "FaceDetailerPipe", + "pos": [ + 1630, + 130 + ], + "size": [ + 480, + 1220 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 106 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 105 + }, + { + "name": "scheduler_func_opt", + "type": "SCHEDULER_FUNC", + "shape": 7, + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "shape": 3, + "links": [ + 107 + ], + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "shape": 6, + "links": null + }, + { + "name": "mask", + "type": "MASK", + "shape": 3, + "links": null + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "shape": 3, + "links": null + }, + { + "name": "cnet_images", + "type": "IMAGE", + "shape": 6, + "links": null + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 307405256705890, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + 0.5, + 10, + 3, + "center-1", + 0, + 0.93, + 0, + 0.7, + "False", + 10, + 0.2, + 1, + false, + 0, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + } + ], + "links": [ + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 81, + 38, + 0, + 44, + 2, + "SAM_MODEL" + ], + [ + 99, + 48, + 0, + 3, + 0, + "MODEL" + ], + [ + 100, + 48, + 3, + 3, + 1, + "CONDITIONING" + ], + [ + 101, + 48, + 4, + 3, + 2, + "CONDITIONING" + ], + [ + 105, + 44, + 0, + 49, + 1, + "DETAILER_PIPE" + ], + [ + 106, + 8, + 0, + 49, + 0, + "IMAGE" + ], + [ + 107, + 49, + 0, + 42, + 0, + "IMAGE" + ], + [ + 109, + 50, + 0, + 8, + 1, + "VAE" + ], + [ + 110, + 48, + 2, + 50, + 0, + "*" + ], + [ + 111, + 8, + 0, + 51, + 0, + "*" + ], + [ + 113, + 51, + 0, + 52, + 0, + "IMAGE" + ], + [ + 114, + 56, + 0, + 44, + 1, + "BBOX_DETECTOR" + ], + [ + 115, + 57, + 1, + 44, + 0, + "BASIC_PIPE" + ], + [ + 116, + 57, + 1, + 48, + 0, + "BASIC_PIPE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "groupNodes": { + "MAKE_BASIC_PIPE": { + "nodes": [ + { + "type": "VAELoader", + "pos": [ + -200, + 600 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [], + "slot_index": 0, + "localized_name": "VAE" + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ], + "index": 0, + "inputs": [] + }, + { + "type": "CheckpointLoaderSimple", + "pos": [ + -660, + 680 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [], + "slot_index": 0, + "localized_name": "MODEL" + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [], + "slot_index": 1, + "localized_name": "CLIP" + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "slot_index": 2, + "localized_name": "VAE" + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors" + ], + "index": 1, + "inputs": [] + }, + { + "type": "CLIPTextEncode", + "pos": [ + -260, + 750 + ], + "size": { + "0": 411.9563903808594, + "1": 162.07196044921875 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "slot_index": 0, + "localized_name": "CONDITIONING" + } + ], + "title": "Positive", + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "RAW photo, delicate, best quality, colorful, 2girls, 8k uhd, film grain, soft lighting, dslr, (Fujifilm XT3), (photorealistic:1.4), (detailed skin), soft lips, (very detailed long ponytail), aged down, studio lighting, from top, colorful sports wear, happy face, spread lips, (walking), (central park, cloud, sunshine), small breast" + ], + "index": 2 + }, + { + "type": "CLIPTextEncode", + "pos": [ + -260, + 960 + ], + "size": { + "0": 410, + "1": 130 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [], + "slot_index": 0, + "localized_name": "CONDITIONING" + } + ], + "title": "Negative", + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "(low quality:1.4), (worst quality:1.4), bad anatomy, (nsfw:1.2), muscle, from back, from front, monochrome, (bikini:1.2)" + ], + "index": 3 + }, + { + "type": "ToBasicPipe", + "pos": [ + 210, + 680 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": null, + "localized_name": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": null, + "localized_name": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "localized_name": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": null, + "localized_name": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null, + "localized_name": "negative" + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [], + "slot_index": 0, + "localized_name": "basic_pipe" + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + }, + "index": 4 + } + ], + "links": [ + [ + 1, + 1, + 2, + 0, + 4, + "CLIP" + ], + [ + 1, + 1, + 3, + 0, + 4, + "CLIP" + ], + [ + 1, + 0, + 4, + 0, + 4, + "MODEL" + ], + [ + 1, + 1, + 4, + 1, + 4, + "CLIP" + ], + [ + 0, + 0, + 4, + 2, + 14, + "VAE" + ], + [ + 2, + 0, + 4, + 3, + 6, + "CONDITIONING" + ], + [ + 3, + 0, + 4, + 4, + 7, + "CONDITIONING" + ] + ], + "external": [ + [ + 4, + 0, + "BASIC_PIPE" + ] + ] + } + }, + "controller_panel": { + "controllers": {}, + "hidden": true, + "highlight": true, + "version": 2, + "default_order": [] + }, + "ds": { + "scale": 0.7513148009015777, + "offset": { + "0": 149.68603515625, + "1": 245.33897399902344 + } + }, + "node_versions": { + "comfyui-impact-pack": "1ae7cae2df8cca06027edfa3a24512671239d6c4", + "comfy-core": "0.3.14", + "comfyui-impact-subpack": "74db20c95eca152a6d686c914edc0ef4e4762cb8" + }, + "ue_links": [], + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/impact-pack.ini b/zavodik/nodes/ComfyUI-Impact-Pack/impact-pack.ini new file mode 100644 index 0000000000000000000000000000000000000000..014ee724ef2b3b0d74ba56ad652e9d05f3ac3bdc --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/impact-pack.ini @@ -0,0 +1,7 @@ +[default] +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +custom_wildcards = C:\AI\ComfyUI_new\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = 50 + diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/install.py b/zavodik/nodes/ComfyUI-Impact-Pack/install.py new file mode 100644 index 0000000000000000000000000000000000000000..4f4572febad876181e6e7c50cfc08953319bf4de --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/install.py @@ -0,0 +1,116 @@ +import os +import shutil +import sys +import subprocess +import threading +import locale +import traceback + + +if sys.argv[0] == 'install.py': + sys.path.append('.') # for portable version + + +impact_path = os.path.join(os.path.dirname(__file__), "modules") + + +comfy_path = os.environ.get('COMFYUI_PATH') +if comfy_path is None: + print(f"\nWARN: The `COMFYUI_PATH` environment variable is not set. Assuming `{os.path.dirname(__file__)}/../../` as the ComfyUI path.", file=sys.stderr) + comfy_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) + +model_path = os.environ.get('COMFYUI_MODEL_PATH') +if model_path is None: + try: + import folder_paths + model_path = folder_paths.models_dir + except: + pass + + if model_path is None: + model_path = os.path.abspath(os.path.join(comfy_path, 'models')) + print(f"\nWARN: The `COMFYUI_MODEL_PATH` environment variable is not set. Assuming `{model_path}` as the ComfyUI path.", file=sys.stderr) + + +sys.path.append(impact_path) +sys.path.append(comfy_path) + + +# --- +def handle_stream(stream, is_stdout): + stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') + + for msg in stream: + if is_stdout: + print(msg, end="", file=sys.stdout) + else: + print(msg, end="", file=sys.stderr) + + +def process_wrap(cmd_str, cwd=None, handler=None, env=None): + print(f"[Impact Pack] EXECUTE: {cmd_str} in '{cwd}'") + process = subprocess.Popen(cmd_str, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, text=True, bufsize=1) + + if handler is None: + handler = handle_stream + + stdout_thread = threading.Thread(target=handler, args=(process.stdout, True)) + stderr_thread = threading.Thread(target=handler, args=(process.stderr, False)) + + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return process.wait() +# --- + + +try: + from torchvision.datasets.utils import download_url + import impact.config + + print("### ComfyUI-Impact-Pack: Check dependencies") + def install(): + new_env = os.environ.copy() + new_env["COMFYUI_PATH"] = comfy_path + new_env["COMFYUI_MODEL_PATH"] = model_path + + # Download model + print("### ComfyUI-Impact-Pack: Check basic models") + sam_path = os.path.join(model_path, "sams") + onnx_path = os.path.join(model_path, "onnx") + + if not os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'skip_download_model')): + try: + if not os.path.exists(os.path.join(sam_path, "sam_vit_b_01ec64.pth")): + download_url("https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", sam_path) + except: + print("[Impact Pack] Failed to auto-download model files. Please download them manually.") + + if not os.path.exists(onnx_path): + print(f"### ComfyUI-Impact-Pack: onnx model directory created ({onnx_path})") + os.mkdir(onnx_path) + + impact.config.write_config() + + # Remove legacy subpack + try: + subpack_path = os.path.join(os.path.dirname(__file__), 'impact_subpack') + if os.path.exists(subpack_path): + shutil.rmtree(subpack_path) + print(f"Legacy subpack is detected. '{subpack_path}' is removed.") + + subpack_path = os.path.join(os.path.dirname(__file__), 'subpack') + if os.path.exists(subpack_path): + shutil.rmtree(subpack_path) + print(f"Legacy subpack is detected. '{subpack_path}' is removed.") + except: + print(f"ERROT: Failed to delete legacy subpack '{subpack_path}'\nPlease delete the folder after terminate ComfyUI.") + + install() + +except Exception: + print("[ERROR] ComfyUI-Impact-Pack: Dependency installation has failed. Please install manually.") + traceback.print_exc() diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/common.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/common.js new file mode 100644 index 0000000000000000000000000000000000000000..31da2c858d165c5a66e2b509965ada4eca45753e --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/common.js @@ -0,0 +1,281 @@ +import { api } from "../../scripts/api.js"; +import { app } from "../../scripts/app.js"; + +let original_show = app.ui.dialog.show; + +export function customAlert(message) { + try { + app.extensionManager.toast.addAlert(message); + } + catch { + alert(message); + } +} + +export function isBeforeFrontendVersion(compareVersion) { + try { + const frontendVersion = window['__COMFYUI_FRONTEND_VERSION__']; + if (typeof frontendVersion !== 'string') { + return false; + } + + function parseVersion(versionString) { + const parts = versionString.split('.').map(Number); + return parts.length === 3 && parts.every(part => !isNaN(part)) ? parts : null; + } + + const currentVersion = parseVersion(frontendVersion); + const comparisonVersion = parseVersion(compareVersion); + + if (!currentVersion || !comparisonVersion) { + return false; + } + + for (let i = 0; i < 3; i++) { + if (currentVersion[i] > comparisonVersion[i]) { + return false; + } else if (currentVersion[i] < comparisonVersion[i]) { + return true; + } + } + + return false; + } catch { + return true; + } +} + +function dialog_show_wrapper(html) { + if (typeof html === "string") { + if(html.includes("IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE")) { + return; + } + + this.textElement.innerHTML = html; + } else { + this.textElement.replaceChildren(html); + } + this.element.style.display = "flex"; +} + +app.ui.dialog.show = dialog_show_wrapper; + + +function nodeFeedbackHandler(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + const w = node.widgets.find((w) => event.detail.widget_name === w.name); + if(w) { + w.value = event.detail.value; + } + } +} + +api.addEventListener("impact-node-feedback", nodeFeedbackHandler); + + +function setMuteState(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + if(event.detail.is_active) + node.mode = 0; + else + node.mode = 2; + } +} + +api.addEventListener("impact-node-mute-state", setMuteState); + + +async function bridgeContinue(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + const mutes = new Set(event.detail.mutes); + const actives = new Set(event.detail.actives); + const bypasses = new Set(event.detail.bypasses); + + for(let i in app.graph._nodes_by_id) { + let this_node = app.graph._nodes_by_id[i]; + if(mutes.has(i)) { + this_node.mode = 2; + } + else if(actives.has(i)) { + this_node.mode = 0; + } + else if(bypasses.has(i)) { + this_node.mode = 4; + } + } + + await app.queuePrompt(0, 1); + } +} + +api.addEventListener("impact-bridge-continue", bridgeContinue); + + +function addQueue(event) { + app.queuePrompt(0, 1); +} + +api.addEventListener("impact-add-queue", addQueue); + + +function refreshPreview(event) { + let node_id = event.detail.node_id; + let item = event.detail.item; + let img = new Image(); + img.src = `/view?filename=${item.filename}&subfolder=${item.subfolder}&type=${item.type}&no-cache=${Date.now()}`; + let node = app.graph._nodes_by_id[node_id]; + if(node) + node.imgs = [img]; +} + +api.addEventListener("impact-preview", refreshPreview); + + +// ============================================================================ +// MaskRectArea Shared Utilities +// ============================================================================ + +/** + * Reads a numeric value from a connected link by inspecting the origin node widget. + * More reliable than getInputData() in ComfyUI's frontend execution model. + * + * @param {LGraphNode} node - LiteGraph node instance + * @param {string} inputName - Name of the input to read + * @returns {number|null} The numeric value or null if not available + */ +export function readLinkedNumber(node, inputName) { + try { + if (!node || !node.graph || !Array.isArray(node.inputs)) { + return null; + } + const inp = node.inputs.find(i => i && i.name === inputName); + if (!inp || inp.link == null) { + return null; + } + + const link = node.graph.links && node.graph.links[inp.link]; + if (!link) { + return null; + } + + const originNode = node.graph.getNodeById + ? node.graph.getNodeById(link.origin_id) + : null; + if (!originNode || !Array.isArray(originNode.widgets) || originNode.widgets.length === 0) { + return null; + } + + const w = originNode.widgets.find(ww => ww && ww.name === "value") + || originNode.widgets[0]; + const v = w ? w.value : null; + + return (typeof v === "number") ? v : null; + } catch (e) { + return null; + } +} + +/** + * Generates a color based on percentage using HSL color space. + * + * @param {number} percent - Value between 0 and 1 + * @param {string} alpha - Hex alpha value (e.g., "ff", "80") + * @returns {string} Hex color string with alpha (e.g., "#ff8040ff") + */ +export function getDrawColor(percent, alpha) { + let h = 360 * percent; + let s = 50; + let l = 50; + l /= 100; + const a = s * Math.min(l, 1 - l) / 100; + const f = n => { + const k = (n + h / 30) % 12; + const color = l - a * Math.max(Math.min(k - 3, 9 - k, 1), -1); + return Math.round(255 * color).toString(16).padStart(2, '0'); + }; + return `#${f(0)}${f(8)}${f(4)}${alpha}`; +} + +/** + * Computes and adjusts canvas size for preview widgets. + * + * @param {LGraphNode} node - LiteGraph node instance + * @param {[number, number]} size - [width, height] array + * @param {number} minHeight - Minimum canvas height (REQUIRED) + * @param {number} minWidth - Minimum canvas width (REQUIRED) + * @returns {void} + */ +export function computeCanvasSize(node, size, minHeight, minWidth) { + // Validate required parameters + if (typeof minHeight !== 'number' || typeof minWidth !== 'number') { + console.warn('[computeCanvasSize] minHeight and minWidth are required parameters'); + return; + } + + // Null safety check for widgets array + if (!node.widgets?.length || node.widgets[0].last_y == null) { + return; + } + + // LiteGraph global availability check + const NODE_WIDGET_HEIGHT = (typeof LiteGraph !== 'undefined' && LiteGraph.NODE_WIDGET_HEIGHT) + ? LiteGraph.NODE_WIDGET_HEIGHT + : 20; + + let y = node.widgets[0].last_y + 5; + let freeSpace = size[1] - y; + + // Compute the height of all non-customCanvas widgets + let widgetHeight = 0; + for (let i = 0; i < node.widgets.length; i++) { + const w = node.widgets[i]; + if (w.type !== "customCanvas") { + if (w.computeSize) { + widgetHeight += w.computeSize()[1] + 4; + } else { + widgetHeight += NODE_WIDGET_HEIGHT + 5; + } + } + } + + // Ensure there is enough vertical space + freeSpace -= widgetHeight; + + // Clamp minimum canvas height + if (freeSpace < minHeight) { + freeSpace = minHeight; + } + + // Allow both grow and shrink to fit content + const targetHeight = y + widgetHeight + freeSpace; + if (node.size[1] !== targetHeight) { + node.size[1] = targetHeight; + node.graph.setDirtyCanvas(true); + } + + // Ensure the node width meets the minimum width requirement + if (node.size[0] < minWidth) { + node.size[0] = minWidth; + node.graph.setDirtyCanvas(true); + } + + // Position each of the widgets + for (const w of node.widgets) { + w.y = y; + if (w.type === "customCanvas") { + y += freeSpace; + } else if (w.computeSize) { + y += w.computeSize()[1] + 4; + } else { + y += NODE_WIDGET_HEIGHT + 4; + } + } + + node.canvasHeight = freeSpace; +} diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-image-util.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-image-util.js new file mode 100644 index 0000000000000000000000000000000000000000..678d60ad5766b80b7deeb538e0c63bc9e6384d19 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-image-util.js @@ -0,0 +1,229 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; + +function load_image(str) { + let base64String = canvas.toDataURL('image/png'); + let img = new Image(); + img.src = base64String; +} + +function getFileItem(baseType, path) { + try { + let pathType = baseType; + + if (path.endsWith("[output]")) { + pathType = "output"; + path = path.slice(0, -9); + } else if (path.endsWith("[input]")) { + pathType = "input"; + path = path.slice(0, -8); + } else if (path.endsWith("[temp]")) { + pathType = "temp"; + path = path.slice(0, -7); + } + + const subfolder = path.substring(0, path.lastIndexOf('/')); + const filename = path.substring(path.lastIndexOf('/') + 1); + + return { + filename: filename, + subfolder: subfolder, + type: pathType + }; + } + catch(exception) { + return null; + } +} + +async function loadImageFromUrl(image, node_id, v, need_to_load) { + let item = getFileItem('temp', v); + + if(item) { + let params = `?node_id=${node_id}&filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + + let res = await api.fetchApi('/impact/set/pb_id_image'+params, { cache: "no-store" }); + if(res.status == 200) { + let pb_id = await res.text(); + if(need_to_load) {; + image.src = api.apiURL(`/view?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`); + } + return pb_id; + } + else { + return `$${node_id}-0`; + } + } + else { + return `$${node_id}-0`; + } +} + +async function loadImageFromId(image, v) { + let res = await api.fetchApi('/impact/get/pb_id_image?id='+v, { cache: "no-store" }); + if(res.status == 200) { + let item = await res.json(); + image.src = api.apiURL(`/view?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`); + return true; + } + + return false; +} + +app.registerExtension({ + name: "Comfy.Impact.img", + + nodeCreated(node, app) { + if(node.comfyClass == "PreviewBridge" || node.comfyClass == "PreviewBridgeLatent") { + let w = node.widgets.find(obj => obj.name === 'image'); + node._imgs = [new Image()]; + node.imageIndex = 0; + + Object.defineProperty(w, 'value', { + async set(v) { + if(w._lock) + return; + + const stackTrace = new Error().stack; + if(stackTrace.includes('presetText.js')) + return; + + var image = new Image(); + if(v && v.constructor == String && v.startsWith('$')) { + // from node feedback + let need_to_load = node._imgs[0].src == ''; + if(await loadImageFromId(image, v, need_to_load)) { + w._value = v; + if(node._imgs[0].src == '') { + node._imgs = [image]; + } + } + else { + w._value = `$${node.id}-0`; + } + } + else { + // from clipspace + w._lock = true; + w._value = await loadImageFromUrl(image, node.id, v, false); + w._lock = false; + } + }, + get() { + if(w._value == undefined) { + w._value = `$${node.id}-0`; + } + return w._value; + } + }); + + Object.defineProperty(node, 'imgs', { + set(v) { + const stackTrace = new Error().stack; + if(v && v.length == 0) + return; + else if(stackTrace.includes('pasteFromClipspace')) { + let sp = new URLSearchParams(v[0].src.split("?")[1]); + let str = ""; + if(sp.get('subfolder')) { + str += sp.get('subfolder') + '/'; + } + str += `${sp.get("filename")} [${sp.get("type")}]`; + + w.value = str; + } + + node._imgs = v; + }, + get() { + return node._imgs; + } + }); + } + + if(node.comfyClass == "ImageReceiver") { + let path_widget = node.widgets.find(obj => obj.name === 'image'); + let w = node.widgets.find(obj => obj.name === 'image_data'); + let stw_widget = node.widgets.find(obj => obj.name === 'save_to_workflow'); + w._value = ""; + + Object.defineProperty(w, 'value', { + set(v) { + if(v != '[IMAGE DATA]') + w._value = v; + }, + get() { + const stackTrace = new Error().stack; + if(!stackTrace.includes('draw') && !stackTrace.includes('graphToPrompt') && stackTrace.includes('app.js')) { + return "[IMAGE DATA]"; + } + else { + if(stw_widget.value) + return w._value; + else + return ""; + } + } + }); + + let set_img_act = (v) => { + node._img = v; + var canvas = document.createElement('canvas'); + canvas.width = v[0].width; + canvas.height = v[0].height; + + var context = canvas.getContext('2d'); + context.drawImage(v[0], 0, 0, v[0].width, v[0].height); + + var base64Image = canvas.toDataURL('image/png'); + w.value = base64Image; + }; + + Object.defineProperty(node, 'imgs', { + set(v) { + if (v && !v[0].complete) { + let orig_onload = v[0].onload; + v[0].onload = function(v2) { + if(orig_onload) + orig_onload(); + set_img_act(v); + }; + } + else { + set_img_act(v); + } + }, + get() { + if(this._img == undefined && w.value != '') { + this._img = [new Image()]; + if(stw_widget.value && w.value != '[IMAGE DATA]') + this._img[0].src = w.value; + } + else if(this._img == undefined && path_widget.value) { + let image = new Image(); + image.src = path_widget.value; + + try { + let item = getFileItem('temp', path_widget.value); + let params = `?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + + let res = api.fetchApi('/view/validate'+params, { cache: "no-store" }).then(response => response); + if(res.status == 200) { + image.src = api.apiURL('/view'+params); + } + + this._img = [new Image()]; // placeholder + image.onload = function(v) { + set_img_act([image]); + }; + } + catch { + + } + } + return this._img; + } + }); + } + } +}) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-pack.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-pack.js new file mode 100644 index 0000000000000000000000000000000000000000..681a570c2b3e94bab9addbe928940f29b71fef11 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-pack.js @@ -0,0 +1,988 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; +import { customAlert, isBeforeFrontendVersion } from "./common.js"; + +const is_legacy_front = () => isBeforeFrontendVersion('1.16.9'); + +if(is_legacy_front()) { + customAlert("An outdated version(<1.16.9) of the `comfyui-frontend-package` is installed. It is not compatible with the current version of the Impact Pack."); +} + +let wildcards_list = []; +let wildcard_status = { + on_demand_mode: false, + total_available: 0, + loaded_count: 0, + last_update: null +}; + +async function load_wildcards() { + let res = await api.fetchApi('/impact/wildcards/list'); + let data = await res.json(); + wildcards_list = data.data; +} + +async function load_wildcard_status() { + try { + let res = await api.fetchApi('/impact/wildcards/list/loaded'); + let data = await res.json(); + wildcard_status = { + on_demand_mode: data.on_demand_mode || false, + total_available: data.total_available || 0, + loaded_count: data.data ? data.data.length : 0, + last_update: new Date() + }; + } catch (error) { + console.error('Failed to load wildcard status:', error); + } +} + +export function get_wildcard_label() { + if (wildcard_status.on_demand_mode) { + return `Select Wildcard 🔵 On-Demand: ${wildcard_status.loaded_count} loaded`; + } else { + return `Select Wildcard 🟢 Full Cache`; + } +} + +export function is_wildcard_label(value) { + // Check if value is a label (not an actual wildcard selection) + return value === "Select the Wildcard to add to the text" || + value.startsWith("Select Wildcard 🔵 On-Demand:") || + value === "Select Wildcard 🟢 Full Cache"; +} + +Promise.all([load_wildcards(), load_wildcard_status()]); + +export function get_wildcards_list() { + return wildcards_list; +} + +export { load_wildcard_status }; + +// temporary implementation (copying from https://github.com/pythongosssss/ComfyUI-WD14-Tagger) +// I think this should be included into master!! +class ImpactProgressBadge { + constructor() { + if (!window.__progress_badge__) { + window.__progress_badge__ = Symbol("__impact_progress_badge__"); + } + this.symbol = window.__progress_badge__; + } + + getState(node) { + return node[this.symbol] || {}; + } + + setState(node, state) { + node[this.symbol] = state; + app.canvas.setDirty(true); + } + + addStatusHandler(nodeType) { + if (nodeType[this.symbol]?.statusTagHandler) { + return; + } + if (!nodeType[this.symbol]) { + nodeType[this.symbol] = {}; + } + nodeType[this.symbol] = { + statusTagHandler: true, + }; + + api.addEventListener("impact/update_status", ({ detail }) => { + let { node, progress, text } = detail; + const n = app.graph.getNodeById(+(node || app.runningNodeId)); + if (!n) return; + const state = this.getState(n); + state.status = Object.assign(state.status || {}, { progress: text ? progress : null, text: text || null }); + this.setState(n, state); + }); + + const self = this; + const onDrawForeground = nodeType.prototype.onDrawForeground; + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + const state = self.getState(this); + if (!state?.status?.text) { + return r; + } + + const { fgColor, bgColor, text, progress, progressColor } = { ...state.status }; + + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(text); + ctx.fillStyle = bgColor || "dodgerblue"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + if (progress) { + ctx.fillStyle = progressColor || "green"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, (sz.width + 12) * progress, 20, 5); + ctx.fill(); + } + + ctx.fillStyle = fgColor || "#fff"; + ctx.fillText(text, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + return r; + }; + } +} + +const input_tracking = {}; +const input_dirty = {}; +const output_tracking = {}; + +function progressExecuteHandler(event) { + if(event.detail?.output?.aux){ + const id = event.detail.node; + if(input_tracking.hasOwnProperty(id)) { + if(input_tracking.hasOwnProperty(id) && input_tracking[id][0] != event.detail.output.aux[0]) { + input_dirty[id] = true; + } + else{ + + } + } + + input_tracking[id] = event.detail.output.aux; + } +} + +function imgSendHandler(event) { + if(event.detail.images.length > 0){ + let data = event.detail.images[0]; + let filename = `${data.filename} [${data.type}]`; + + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'ImageReceiver') { + let is_linked = false; + + if(nodes[i].widgets[1].type == 'converted-widget') { + for(let j in nodes[i].inputs) { + let input = nodes[i].inputs[j]; + if(input.name === 'link_id') { + if(input.link) { + let src_node = app.graph._nodes_by_id[app.graph.links[input.link].origin_id]; + if(src_node.type == 'ImpactInt' || src_node.type == 'PrimitiveNode') { + is_linked = true; + } + } + break; + } + } + } + else if(nodes[i].widgets[1].value == event.detail.link_id) { + is_linked = true; + } + + if(is_linked) { + if(data.subfolder) + nodes[i].widgets[0].value = `${data.subfolder}/${data.filename} [${data.type}]`; + else + nodes[i].widgets[0].value = `${data.filename} [${data.type}]`; + + let img = new Image(); + img.onload = (event) => { + nodes[i].imgs = [img]; + nodes[i].size[1] = Math.max(200, nodes[i].size[1]); + app.canvas.setDirty(true); + }; + img.src = `/view?filename=${data.filename}&type=${data.type}&subfolder=${data.subfolder}`+app.getPreviewFormatParam(); + } + } + } + } +} + + +function latentSendHandler(event) { + if(event.detail.images.length > 0){ + let data = event.detail.images[0]; + let filename = `${data.filename} [${data.type}]`; + + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'LatentReceiver') { + if(nodes[i].widgets[1].value == event.detail.link_id) { + if(data.subfolder) + nodes[i].widgets[0].value = `${data.subfolder}/${data.filename} [${data.type}]`; + else + nodes[i].widgets[0].value = `${data.filename} [${data.type}]`; + + let img = new Image(); + img.src = `/view?filename=${data.filename}&type=${data.type}&subfolder=${data.subfolder}`+app.getPreviewFormatParam(); + nodes[i].imgs = [img]; + nodes[i].size[1] = Math.max(200, nodes[i].size[1]); + } + } + } + } +} + + +function valueSendHandler(event) { + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'ImpactValueReceiver') { + if(nodes[i].widgets[2].value == event.detail.link_id) { + nodes[i].widgets[1].value = event.detail.value; + + let typ = typeof event.detail.value; + if(typ == 'string') { + nodes[i].widgets[0].value = "STRING"; + } + else if(typ == "boolean") { + nodes[i].widgets[0].value = "BOOLEAN"; + } + else if(typ != "number") { + nodes[i].widgets[0].value = typeof event.detail.value; + } + else if(Number.isInteger(event.detail.value)) { + nodes[i].widgets[0].value = "INT"; + } + else { + nodes[i].widgets[0].value = "FLOAT"; + } + } + } + } +} + + +const impactProgressBadge = new ImpactProgressBadge(); + +api.addEventListener("stop-iteration", () => { + document.getElementById("autoQueueCheckbox").checked = false; +}); +api.addEventListener("value-send", valueSendHandler); +api.addEventListener("img-send", imgSendHandler); +api.addEventListener("latent-send", latentSendHandler); +api.addEventListener("executed", progressExecuteHandler); + +// Update wildcard status after workflow execution (on-demand mode) +api.addEventListener("executed", async (event) => { + if (wildcard_status.on_demand_mode) { + await load_wildcard_status(); + await load_wildcards(); + app.canvas.setDirty(true); + } +}); + +app.registerExtension({ + name: "Comfy.Impack", + + commands: [ + { + id: 'refresh-impact-wildcard', + label: 'Impact: Refresh Wildcard', + function: async () => { + await api.fetchApi('/impact/wildcards/refresh'); + await Promise.all([load_wildcards(), load_wildcard_status()]); + app.extensionManager.toast.add({ + severity: 'info', + summary: 'Refreshed!', + detail: 'Impact Wildcard List is refreshed!!', + life: 3000 + }); + } + } + ], + + menuCommands: [ + { + path: ['Edit'], + commands: ['refresh-impact-wildcard'] + } + ], + + loadedGraphNode(node, app) { + if (node.comfyClass == "MaskPainter") { + input_dirty[node.id + ""] = true; + } + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name == "IterativeLatentUpscale" || nodeData.name == "IterativeImageUpscale" + || nodeData.name == "RegionalSampler"|| nodeData.name == "RegionalSamplerAdvanced") { + impactProgressBadge.addStatusHandler(nodeType); + } + + if(nodeData.name == "ImpactControlBridge") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(index != 0 || !link_info || this.inputs[0].type != '*') + return; + + // assign type + let slot_type = '*'; + + if(type == 2) { + slot_type = link_info.type; + } + else { + const node = app.graph.getNodeById(link_info.origin_id); + slot_type = node?.outputs[link_info.origin_slot]?.type; + } + + this.inputs[0].type = slot_type; + this.outputs[0].type = slot_type; + this.outputs[0].label = slot_type; + } + } + + if(nodeData.name == "ImpactConditionalBranch" || nodeData.name == "ImpactConditionalBranchSelMode") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || this.inputs[0].type != '*') + return; + + if(index >= 2) + return; + + // assign type + let slot_type = '*'; + + if(type == 2) { + slot_type = link_info.type; + } + else { + const node = app.graph.getNodeById(link_info.origin_id); + slot_type = node?.outputs[link_info.origin_slot].type; + } + + this.inputs[0].type = slot_type; + this.inputs[1].type = slot_type; + this.outputs[0].type = slot_type; + this.outputs[0].label = slot_type; + } + } + + if(nodeData.name == "ImpactCompare") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || this.inputs[0].type != '*' || type == 2) + return; + + // assign type + const node = app.graph.getNodeById(link_info.origin_id); + let slot_type = node?.outputs[link_info.origin_slot].type; + + this.inputs[0].type = slot_type; + this.inputs[1].type = slot_type; + } + } + + if(nodeData.name == "ImpactSelectNthItemOfAnyList") { + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || this.inputs[0].type != '*') + return; + + if(index >= 2) + return; + + // assign type + let slot_type = '*'; + + if(type == 2) { + slot_type = link_info.type; + } + else { + const node = app.graph.getNodeById(link_info.origin_id); + slot_type = node?.outputs[link_info.origin_slot].type; + } + + this.inputs[0].type = slot_type; + this.outputs[0].type = slot_type; + this.outputs[0].label = slot_type; + } + } + + if(nodeData.name === 'ImpactInversedSwitch') { + nodeData.output = ['*']; + nodeData.output_is_list = [false]; + nodeData.output_name = ['output1']; + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info) + return; + + // HOTFIX: subgraph + const stackTrace = new Error().stack; + + if(stackTrace.includes('convertToSubgraph') || stackTrace.includes('Subgraph.configure')) { + return; + } + + if(type == 2) { + // connect output + if(connected){ + if(app.graph._nodes_by_id[link_info.target_id]?.type == 'Reroute') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + + if(this.outputs[0].type == '*'){ + if(link_info.type == '*' && app.graph.getNodeById(link_info.target_id).slots[link_info.target_slot].type != '*') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + else { + // propagate type + this.outputs[0].type = link_info.type; + this.outputs[0].name = link_info.type; + + for(let i in this.inputs) { + if(this.inputs[i].name != 'select') + this.inputs[i].type = link_info.type; + } + } + } + } + } + else { + if(app.graph._nodes_by_id[link_info.origin_id]?.type == 'Reroute') + this.disconnectInput(link_info.target_slot); + + // connect input + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + let origin_type = node?.outputs[link_info.origin_slot]?.type; + + if(origin_type==undefined) { + return; // fallback + } + + if(origin_type == '*' && app.graph.getNodeById(link_info.origin_id).slots[link_info.origin_slot].type != '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + if(this.inputs[i].name != 'select') + this.inputs[i].type = origin_type; + } + + this.outputs[0].type = origin_type; + this.outputs[0].name = 'output1'; + } + + return; + } + + if (!connected && this.outputs.length > 1) { + const stackTrace = new Error().stack; + + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData')) { + if(this.outputs[link_info.origin_slot].links.length == 0) { + this.removeOutput(link_info.origin_slot); + } + } + } + + let slot_i = 1; + for (let i = 0; i < this.outputs.length; i++) { + this.outputs[i].name = `output${slot_i}` + if (this.outputs[i].slot_index === undefined) { + this.outputs[i].slot_index = i; + } + slot_i++; + } + + if(connected) { + // NOTE: node.slot_index is different with link_info.origin_slot + let last_slot_index = this.outputs.length - 1; + if (last_slot_index == link_info.origin_slot) { + this.addOutput(`output${slot_i}`, this.outputs[0].type); + } + } + + let select_slot = this.inputs.find(x => x.name == "select"); + if(this.widgets?.length) { + this.widgets[0].options.max = select_slot?this.outputs.length-1:this.outputs.length; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + if(this.widgets[0].options.max > 0 && this.widgets[0].value == 0) + this.widgets[0].value = 1; + } + } + } + + if (nodeData.name === 'ImpactMakeImageList' || nodeData.name === 'ImpactMakeImageBatch' || + nodeData.name === 'ImpactMakeMaskList' || nodeData.name === 'ImpactMakeMaskBatch' || + nodeData.name === 'ImpactMakeAnyList' || nodeData.name === 'CombineRegionalPrompts' || + nodeData.name === 'ImpactCombineConditionings' || nodeData.name === 'ImpactConcatConditionings' || + nodeData.name === 'ImpactSEGSConcat' || + nodeData.name === 'ImpactSwitch' || nodeData.name === 'LatentSwitch' || nodeData.name == 'SEGSSwitch') { + var input_name = "input"; + + switch(nodeData.name) { + case 'ImpactMakeImageList': + case 'ImpactMakeImageBatch': + input_name = "image"; + break; + + case 'ImpactMakeMaskList': + case 'ImpactMakeMaskBatch': + input_name = "mask"; + break; + + case 'ImpactMakeAnyList': + input_name = "value"; + break; + + case 'ImpactSEGSConcat': + input_name = "segs"; + break; + + case 'CombineRegionalPrompts': + input_name = "regional_prompts"; + break; + + case 'ImpactCombineConditionings': + case 'ImpactConcatConditionings': + input_name = "conditioning"; + break; + + case 'LatentSwitch': + input_name = "input"; + break; + + case 'SEGSSwitch': + input_name = "input"; + break; + + case 'ImpactSwitch': + input_name = "input"; + } + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + const stackTrace = new Error().stack; + + // HOTFIX: subgraph + if(stackTrace.includes('convertToSubgraph') || stackTrace.includes('Subgraph.configure')) { + return; + } + + if(stackTrace.includes('loadGraphData')) { + if(this.widgets?.[0]) { + this.widgets[0].options.max = this.inputs.length-3; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + } + return; + } + + if(stackTrace.includes('pasteFromClipboard')) { + if(this.widgets?.[0]) { + this.widgets[0].options.max = this.inputs.length-3; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + } + return; + } + + if(!link_info) + return; + + if(type == 2) { + // connect output + if(connected && index == 0){ + if(nodeData.name == 'ImpactSwitch' && app.graph._nodes_by_id[link_info.target_id]?.type == 'Reroute') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + + if(this.outputs[0].type == '*'){ + if(link_info.type == '*' && app.graph.getNodeById(link_info.target_id).slots[link_info.target_slot].type != '*') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + else { + // propagate type + this.outputs[0].type = link_info.type; + this.outputs[0].label = link_info.type; + this.outputs[0].name = link_info.type; + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = link_info.type; + } + } + } + } + + return; + } + else { + if(nodeData.name == 'ImpactSwitch' && app.graph._nodes_by_id[link_info.origin_id]?.type == 'Reroute') + this.disconnectInput(link_info.target_slot); + + // connect input + if(this.inputs[index].name == 'select' || this.inputs[index].name == 'sel_mode') + return; + + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + + // NOTE: node is undefined when subgraph editing mode + if(node) { + let origin_type = node.outputs[link_info.origin_slot]?.type; + if(link_info.target_slot == 0 && this.inputs.length > 3) { // NOTE: widgets are regarded as input since new front + origin_type = this.inputs[1].type; + node.connect(link_info.origin_slot, node.id, 'input1'); + } + + if(origin_type == '*' && app.graph.getNodeById(link_info.origin_id).slots[link_info.origin_slot].type != '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = origin_type; + } + + this.outputs[0].type = origin_type; + this.outputs[0].label = origin_type; + this.outputs[0].name = origin_type; + } + } + } + + let widget_count = 0; + if(nodeData.name == 'ImpactSwitch' || nodeData.name == 'LatentSwitch' || nodeData.name == 'SEGSSwitch') { + widget_count += 1; + } + + if (!connected && (this.inputs.length > widget_count+1)) { + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData') && + this.inputs[index].name != 'select') { + this.removeInput(index); + } + } + + let slot_i = 1; + for (let i = 0; i < this.inputs.length; i++) { + let input_i = this.inputs[i]; + if(input_i.name != 'select'&& input_i.name != 'sel_mode') { + input_i.name = `${input_name}${slot_i}` + slot_i++; + } + } + + if(connected) { + this.addInput(`${input_name}${slot_i}`, this.outputs[0].type); + } + + if(this.widgets?.[0]) { + this.widgets[0].options.max = this.inputs.length-3; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + } + } + } + }, + + nodeCreated(node, app) { + if(node.comfyClass == "MaskPainter") { + node.addWidget("button", "Edit mask", null, () => { + ComfyApp.copyToClipspace(node); + ComfyApp.clipspace_return_node = node; + ComfyApp.open_maskeditor(); + }); + } + + switch(node.comfyClass) { + case "ToDetailerPipe": + case "ToDetailerPipeSDXL": + case "BasicPipeToDetailerPipe": + case "BasicPipeToDetailerPipeSDXL": + case "EditDetailerPipe": + case "FaceDetailer": + case "DetailerForEach": + case "DetailerForEachDebug": + case "DetailerForEachPipe": + case "DetailerForEachDebugPipe": + { + for(let i in node.widgets) { + let widget = node.widgets[i]; + if(widget.type === "customtext") { + widget.dynamicPrompts = false; + widget.inputEl.placeholder = "wildcard spec: if kept empty, this option will be ignored"; + widget.serializeValue = () => { + return node.widgets[i].value; + }; + } + } + } + break; + } + + if(node.comfyClass == "ImpactSEGSLabelFilter" || node.comfyClass == "SEGSLabelFilterDetailerHookProvider") { + node.widgets[0].callback = (value, canvas, node, pos, e) => { + if(node) { + if(node.widgets[1].value.trim() != "" && !node.widgets[1].value.trim().endsWith(",")) + node.widgets[1].value += ", " + + node.widgets[1].value += value; + if(node.widgets_values) + node.widgets_values[1] = node.widgets[1].value; + } + } + + Object.defineProperty(node.widgets[0], "value", { + set: (value) => { + node._value = value; + }, + get: () => { + return node._value; + } + }); + } + + if(node.comfyClass == "UltralyticsDetectorProvider") { + let model_name_widget = node.widgets.find((w) => w.name === "model_name"); + let orig_draw = node.onDrawForeground; + node.onDrawForeground = function (ctx) { + const r = orig_draw?.apply?.(this, arguments); + + let is_seg = model_name_widget.value?.startsWith('segm/') || model_name_widget.value?.includes('-seg'); + if(!is_seg) { + var slot_pos = new Float32Array(2); + var pos = node.getConnectionPos(false, 1, slot_pos); + + pos[0] -= node.pos[0] - 10; + pos[1] -= node.pos[1]; + + ctx.beginPath(); + ctx.strokeStyle = "red"; + ctx.lineWidth = 4; + ctx.moveTo(pos[0] - 5, pos[1] - 5); + ctx.lineTo(pos[0] + 5, pos[1] + 5); + ctx.moveTo(pos[0] + 5, pos[1] - 5); + ctx.lineTo(pos[0] - 5, pos[1] + 5); + ctx.stroke(); + } + } + } + + if( + node.comfyClass == "ImpactWildcardEncode" || node.comfyClass == "ImpactWildcardProcessor" + || node.comfyClass == "ToDetailerPipe" || node.comfyClass == "ToDetailerPipeSDXL" + || node.comfyClass == "EditDetailerPipe" || node.comfyClass == "EditDetailerPipeSDXL" + || node.comfyClass == "BasicPipeToDetailerPipe" || node.comfyClass == "BasicPipeToDetailerPipeSDXL") { + node._value = "Select the LoRA to add to the text"; + node._wvalue = "Select the Wildcard to add to the text"; + + var tbox_id = 0; + var combo_id = 3; + var has_lora = true; + + switch(node.comfyClass){ + case "ImpactWildcardEncode": + tbox_id = 0; + combo_id = 3; + break; + + case "ImpactWildcardProcessor": + tbox_id = 0; + combo_id = 4; + has_lora = false; + break; + + case "ToDetailerPipe": + case "ToDetailerPipeSDXL": + case "EditDetailerPipe": + case "EditDetailerPipeSDXL": + case "BasicPipeToDetailerPipe": + case "BasicPipeToDetailerPipeSDXL": + tbox_id = 0; + combo_id = 1; + break; + } + + node.widgets[combo_id+1].callback = async (value, canvas, node, pos, e) => { + if(node) { + if(node.widgets[tbox_id].value != '') + node.widgets[tbox_id].value += ', ' + + node.widgets[tbox_id].value += node._wildcard_value; + + // Reload wildcard status to update loaded count + if (wildcard_status.on_demand_mode) { + await load_wildcard_status(); + await load_wildcards(); + app.canvas.setDirty(true); + } + } + } + + Object.defineProperty(node.widgets[combo_id+1], "value", { + set: (value) => { + if (!is_wildcard_label(value)) + node._wildcard_value = value; + }, + get: () => { return get_wildcard_label(); } + }); + + Object.defineProperty(node.widgets[combo_id+1].options, "values", { + set: (x) => {}, + get: () => { + return wildcards_list; + } + }); + + if(has_lora) { + node.widgets[combo_id].callback = (value, canvas, node, pos, e) => { + if(node) { + let lora_name = node._value; + if(lora_name.endsWith('.safetensors')) { + lora_name = lora_name.slice(0, -12); + } + + node.widgets[tbox_id].value += ``; + if(node.widgets_values) { + node.widgets_values[tbox_id] = node.widgets[tbox_id].value; + } + } + } + + Object.defineProperty(node.widgets[combo_id], "value", { + set: (value) => { + if (value !== "Select the LoRA to add to the text") + node._value = value; + }, + + get: () => { return "Select the LoRA to add to the text"; } + }); + } + + // Preventing validation errors from occurring in any situation. + if(has_lora) { + node.widgets[combo_id].serializeValue = () => { return "Select the LoRA to add to the text"; } + } + node.widgets[combo_id+1].serializeValue = () => { return "Select the Wildcard to add to the text"; } + } + + if(node.comfyClass == "ImpactWildcardProcessor" || node.comfyClass == "ImpactWildcardEncode") { + node.widgets[0].inputEl.placeholder = "Wildcard Prompt (User input)"; + node.widgets[1].inputEl.placeholder = "Populated Prompt (Will be generated automatically)"; + node.widgets[1].inputEl.disabled = true; + + const populated_text_widget = node.widgets.find((w) => w.name == 'populated_text'); + const mode_widget = node.widgets.find((w) => w.name == 'mode'); + + // mode combo + Object.defineProperty(mode_widget, "value", { + set: (value) => { + if(value == true) + node._mode_value = "populate"; + else if(value == false) + node._mode_value = "fixed"; + else + node._mode_value = value; // combo value + + populated_text_widget.inputEl.disabled = node._mode_value == 'populate'; + }, + get: () => { + if(node._mode_value != undefined) + return node._mode_value; + else + return 'populate'; + } + }); + } + + if (node.comfyClass == "MaskPainter") { + node.widgets[0].value = '#placeholder'; + + Object.defineProperty(node, "images", { + set: function(value) { + node._images = value; + }, + get: function() { + const id = node.id+""; + if(node.widgets[0].value != '#placeholder') { + var need_invalidate = false; + + if(input_dirty.hasOwnProperty(id) && input_dirty[id]) { + node.widgets[0].value = {...input_tracking[id][1]}; + input_dirty[id] = false; + need_invalidate = true + this._images = app.nodeOutputs[id].images; + } + + let filename = app.nodeOutputs[id]['aux'][1][0]['filename']; + let subfolder = app.nodeOutputs[id]['aux'][1][0]['subfolder']; + let type = app.nodeOutputs[id]['aux'][1][0]['type']; + + let item = + { + image_hash: app.nodeOutputs[id]['aux'][0], + forward_filename: app.nodeOutputs[id]['aux'][1][0]['filename'], + forward_subfolder: app.nodeOutputs[id]['aux'][1][0]['subfolder'], + forward_type: app.nodeOutputs[id]['aux'][1][0]['type'] + }; + + if(node._images) { + app.nodeOutputs[id].images = [{ + ...node._images[0], + ...item + }]; + + node.widgets[0].value = + { + ...node._images[0], + ...item + }; + } + else { + app.nodeOutputs[id].images = [{ + ...item + }]; + + node.widgets[0].value = + { + ...item + }; + } + + if(need_invalidate) { + Promise.all( + app.nodeOutputs[id].images.map((src) => { + return new Promise((r) => { + const img = new Image(); + img.onload = () => r(img); + img.onerror = () => r(null); + img.src = "/view?" + new URLSearchParams(src).toString(); + }); + }) + ).then((imgs) => { + this.imgs = imgs.filter(Boolean); + this.setSizeForImage?.(); + app.graph.setDirtyCanvas(true); + }); + + app.nodeOutputs[id].images[0] = { ...node.widgets[0].value }; + } + + return app.nodeOutputs[id].images; + } + else { + return node._images; + } + } + }); + } + } +}); diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js new file mode 100644 index 0000000000000000000000000000000000000000..371987c0b8f9fa7149d48711c3540f14d0768db1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js @@ -0,0 +1,641 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { ComfyApp } from "../../scripts/app.js"; +import { ClipspaceDialog } from "../../extensions/core/clipspace.js"; + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +// Helper function to convert a data URL to a Blob object +function dataURLToBlob(dataURL) { + const parts = dataURL.split(';base64,'); + const contentType = parts[0].split(':')[1]; + const byteString = atob(parts[1]); + const arrayBuffer = new ArrayBuffer(byteString.length); + const uint8Array = new Uint8Array(arrayBuffer); + for (let i = 0; i < byteString.length; i++) { + uint8Array[i] = byteString.charCodeAt(i); + } + return new Blob([arrayBuffer], { type: contentType }); +} + +function loadedImageToBlob(image) { + const canvas = document.createElement('canvas'); + + canvas.width = image.width; + canvas.height = image.height; + + const ctx = canvas.getContext('2d'); + + ctx.drawImage(image, 0, 0); + + const dataURL = canvas.toDataURL('image/png', 1); + const blob = dataURLToBlob(dataURL); + + return blob; +} + +async function uploadMask(filepath, formData) { + await api.fetchApi('/upload/mask', { + method: 'POST', + body: formData + }).then(response => {}).catch(error => { + console.error('Error:', error); + }); + + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = `view?filename=${filepath.filename}&type=${filepath.type}`; + + if(ComfyApp.clipspace.images) + ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; + + ClipspaceDialog.invalidatePreview(); +} + +class ImpactSamEditorDialog extends ComfyDialog { + static instance = null; + + static getInstance() { + if(!ImpactSamEditorDialog.instance) { + ImpactSamEditorDialog.instance = new ImpactSamEditorDialog(); + } + + return ImpactSamEditorDialog.instance; + } + + constructor() { + super(); + this.element = $el("div.comfy-modal", { parent: document.body }, + [ $el("div.comfy-modal-content", + [...this.createButtons()]), + ]); + } + + createButtons() { + return []; + } + + createButton(name, callback) { + var button = document.createElement("button"); + button.innerText = name; + button.addEventListener("click", callback); + return button; + } + + createLeftButton(name, callback) { + var button = this.createButton(name, callback); + button.style.cssFloat = "left"; + button.style.marginRight = "4px"; + return button; + } + + createRightButton(name, callback) { + var button = this.createButton(name, callback); + button.style.cssFloat = "right"; + button.style.marginLeft = "4px"; + return button; + } + + createLeftSlider(self, name, callback) { + const divElement = document.createElement('div'); + divElement.id = "sam-confidence-slider"; + divElement.style.cssFloat = "left"; + divElement.style.fontFamily = "sans-serif"; + divElement.style.marginRight = "4px"; + divElement.style.color = "var(--input-text)"; + divElement.style.backgroundColor = "var(--comfy-input-bg)"; + divElement.style.borderRadius = "8px"; + divElement.style.borderColor = "var(--border-color)"; + divElement.style.borderStyle = "solid"; + divElement.style.fontSize = "15px"; + divElement.style.height = "21px"; + divElement.style.padding = "1px 6px"; + divElement.style.display = "flex"; + divElement.style.position = "relative"; + divElement.style.top = "2px"; + self.confidence_slider_input = document.createElement('input'); + self.confidence_slider_input.setAttribute('type', 'range'); + self.confidence_slider_input.setAttribute('min', '0'); + self.confidence_slider_input.setAttribute('max', '100'); + self.confidence_slider_input.setAttribute('value', '70'); + const labelElement = document.createElement("label"); + labelElement.textContent = name; + + divElement.appendChild(labelElement); + divElement.appendChild(self.confidence_slider_input); + + self.confidence_slider_input.addEventListener("change", callback); + + return divElement; + } + + async detect_and_invalidate_mask_canvas(self) { + const mask_img = await self.detect(self); + + const canvas = self.maskCtx.canvas; + const ctx = self.maskCtx; + + ctx.clearRect(0, 0, canvas.width, canvas.height); + + await new Promise((resolve, reject) => { + self.mask_image = new Image(); + self.mask_image.onload = function() { + ctx.drawImage(self.mask_image, 0, 0, canvas.width, canvas.height); + resolve(); + }; + self.mask_image.onerror = reject; + self.mask_image.src = mask_img.src; + }); + } + + setlayout(imgCanvas, maskCanvas, pointsCanvas) { + const self = this; + + // If it is specified as relative, using it only as a hidden placeholder for padding is recommended + // to prevent anomalies where it exceeds a certain size and goes outside of the window. + var placeholder = document.createElement("div"); + placeholder.style.position = "relative"; + placeholder.style.height = "50px"; + + var bottom_panel = document.createElement("div"); + bottom_panel.style.position = "absolute"; + bottom_panel.style.bottom = "0px"; + bottom_panel.style.left = "20px"; + bottom_panel.style.right = "20px"; + bottom_panel.style.height = "50px"; + + var brush = document.createElement("div"); + brush.id = "sam-brush"; + brush.style.backgroundColor = "blue"; + brush.style.outline = "2px solid pink"; + brush.style.borderRadius = "50%"; + brush.style.MozBorderRadius = "50%"; + brush.style.WebkitBorderRadius = "50%"; + brush.style.position = "absolute"; + brush.style.zIndex = 100; + brush.style.pointerEvents = "none"; + this.brush = brush; + this.element.appendChild(imgCanvas); + this.element.appendChild(maskCanvas); + this.element.appendChild(pointsCanvas); + this.element.appendChild(placeholder); // must below z-index than bottom_panel to avoid covering button + this.element.appendChild(bottom_panel); + document.body.appendChild(brush); + this.brush_size = 5; + + var confidence_slider = this.createLeftSlider(self, "Confidence", (event) => { + self.confidence = event.target.value; + }); + + var clearButton = this.createLeftButton("Clear", () => { + self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); + self.pointsCtx.clearRect(0, 0, self.pointsCanvas.width, self.pointsCanvas.height); + + self.prompt_points = []; + + self.invalidatePointsCanvas(self); + }); + + var detectButton = this.createLeftButton("Detect", () => self.detect_and_invalidate_mask_canvas(self)); + + var cancelButton = this.createRightButton("Cancel", () => { + document.removeEventListener("mouseup", ImpactSamEditorDialog.handleMouseUp); + document.removeEventListener("keydown", ImpactSamEditorDialog.handleKeyDown); + self.close(); + }); + + self.saveButton = this.createRightButton("Save", () => { + document.removeEventListener("mouseup", ImpactSamEditorDialog.handleMouseUp); + document.removeEventListener("keydown", ImpactSamEditorDialog.handleKeyDown); + self.save(self); + }); + + var undoButton = this.createLeftButton("Undo", () => { + if(self.prompt_points.length > 0) { + self.prompt_points.pop(); + self.pointsCtx.clearRect(0, 0, self.pointsCanvas.width, self.pointsCanvas.height); + self.invalidatePointsCanvas(self); + } + }); + + bottom_panel.appendChild(clearButton); + bottom_panel.appendChild(detectButton); + bottom_panel.appendChild(self.saveButton); + bottom_panel.appendChild(cancelButton); + bottom_panel.appendChild(confidence_slider); + bottom_panel.appendChild(undoButton); + + imgCanvas.style.position = "relative"; + imgCanvas.style.top = "200"; + imgCanvas.style.left = "0"; + + maskCanvas.style.position = "absolute"; + maskCanvas.style.opacity = 0.5; + pointsCanvas.style.position = "absolute"; + } + + show() { + this.mask_image = null; + self.prompt_points = []; + + this.message_box = $el("p", ["Please wait a moment while the SAM model and the image are being loaded."]); + this.element.appendChild(this.message_box); + + if(self.imgCtx) { + self.imgCtx.clearRect(0, 0, self.imageCanvas.width, self.imageCanvas.height); + } + + const target_image_path = ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src; + this.load_sam(target_image_path); + + if(!this.is_layout_created) { + // layout + const imgCanvas = document.createElement('canvas'); + const maskCanvas = document.createElement('canvas'); + const pointsCanvas = document.createElement('canvas'); + + imgCanvas.id = "imageCanvas"; + maskCanvas.id = "samEditorMaskCanvas"; + pointsCanvas.id = "pointsCanvas"; + + this.setlayout(imgCanvas, maskCanvas, pointsCanvas); + + // prepare content + this.imgCanvas = imgCanvas; + this.maskCanvas = maskCanvas; + this.pointsCanvas = pointsCanvas; + this.maskCtx = maskCanvas.getContext('2d'); + this.pointsCtx = pointsCanvas.getContext('2d'); + + this.is_layout_created = true; + + // replacement of onClose hook since close is not real close + const self = this; + const observer = new MutationObserver(function(mutations) { + mutations.forEach(function(mutation) { + if (mutation.type === 'attributes' && mutation.attributeName === 'style') { + if(self.last_display_style && self.last_display_style != 'none' && self.element.style.display == 'none') { + ComfyApp.onClipspaceEditorClosed(); + } + + self.last_display_style = self.element.style.display; + } + }); + }); + + const config = { attributes: true }; + observer.observe(this.element, config); + } + + this.setImages(target_image_path, this.imgCanvas, this.pointsCanvas); + + if(ComfyApp.clipspace_return_node) { + this.saveButton.innerText = "Save to node"; + } + else { + this.saveButton.innerText = "Save"; + } + this.saveButton.disabled = true; + + this.element.style.display = "block"; + this.element.style.zIndex = 8888; // NOTE: alert dialog must be high priority. + } + + updateBrushPreview(self, event) { + event.preventDefault(); + + const centerX = event.pageX; + const centerY = event.pageY; + + const brush = self.brush; + + brush.style.width = self.brush_size * 2 + "px"; + brush.style.height = self.brush_size * 2 + "px"; + brush.style.left = (centerX - self.brush_size) + "px"; + brush.style.top = (centerY - self.brush_size) + "px"; + } + + setImages(target_image_path, imgCanvas, pointsCanvas) { + const imgCtx = imgCanvas.getContext('2d'); + const maskCtx = this.maskCtx; + const maskCanvas = this.maskCanvas; + + const self = this; + + // image load + const orig_image = new Image(); + window.addEventListener("resize", () => { + // repositioning + imgCanvas.width = window.innerWidth - 250; + imgCanvas.height = window.innerHeight - 200; + + // redraw image + let drawWidth = orig_image.width; + let drawHeight = orig_image.height; + + if (orig_image.width > imgCanvas.width) { + drawWidth = imgCanvas.width; + drawHeight = (drawWidth / orig_image.width) * orig_image.height; + } + + if (drawHeight > imgCanvas.height) { + drawHeight = imgCanvas.height; + drawWidth = (drawHeight / orig_image.height) * orig_image.width; + } + + imgCtx.drawImage(orig_image, 0, 0, drawWidth, drawHeight); + + // update mask + let w = (drawWidth * imgCanvas.clientWidth/imgCanvas.width) + "px"; + let h = (drawHeight * imgCanvas.clientHeight/imgCanvas.height) + "px"; + + pointsCanvas.width = drawWidth * imgCanvas.clientWidth/imgCanvas.width; + pointsCanvas.height = drawHeight * imgCanvas.clientHeight/imgCanvas.height; + pointsCanvas.style.top = imgCanvas.offsetTop + "px"; + pointsCanvas.style.left = imgCanvas.offsetLeft + "px"; + + maskCanvas.width = pointsCanvas.width; + maskCanvas.height = pointsCanvas.height; + maskCanvas.style.top = imgCanvas.offsetTop + "px"; + maskCanvas.style.left = imgCanvas.offsetLeft + "px"; + + self.invalidateMaskCanvas(self); + self.invalidatePointsCanvas(self); + }); + + // original image load + orig_image.onload = () => self.onLoaded(self); + const rgb_url = new URL(target_image_path); + rgb_url.searchParams.delete('channel'); + rgb_url.searchParams.set('channel', 'rgb'); + orig_image.src = rgb_url; + self.image = orig_image; + } + + onLoaded(self) { + if(self.message_box) { + self.element.removeChild(self.message_box); + self.message_box = null; + } + + window.dispatchEvent(new Event('resize')); + + self.setEventHandler(pointsCanvas); + self.saveButton.disabled = false; + } + + setEventHandler(targetCanvas) { + targetCanvas.addEventListener("contextmenu", (event) => { + event.preventDefault(); + }); + + const self = this; + targetCanvas.addEventListener('pointermove', (event) => this.updateBrushPreview(self,event)); + targetCanvas.addEventListener('pointerdown', (event) => this.handlePointerDown(self,event)); + targetCanvas.addEventListener('pointerover', (event) => { this.brush.style.display = "block"; }); + targetCanvas.addEventListener('pointerleave', (event) => { this.brush.style.display = "none"; }); + document.addEventListener('keydown', ImpactSamEditorDialog.handleKeyDown); + } + + static handleKeyDown(event) { + const self = ImpactSamEditorDialog.instance; + if (event.key === '=') { // positive + brush.style.backgroundColor = "blue"; + brush.style.outline = "2px solid pink"; + self.is_positive_mode = true; + } else if (event.key === '-') { // negative + brush.style.backgroundColor = "red"; + brush.style.outline = "2px solid skyblue"; + self.is_positive_mode = false; + } + } + + is_positive_mode = true; + prompt_points = []; + confidence = 70; + + invalidatePointsCanvas(self) { + const ctx = self.pointsCtx; + + for (const i in self.prompt_points) { + const [is_positive, x, y] = self.prompt_points[i]; + + const scaledX = x * ctx.canvas.width / self.image.width; + const scaledY = y * ctx.canvas.height / self.image.height; + + if(is_positive) + ctx.fillStyle = "blue"; + else + ctx.fillStyle = "red"; + ctx.beginPath(); + ctx.arc(scaledX, scaledY, 3, 0, 3 * Math.PI); + ctx.fill(); + } + } + + invalidateMaskCanvas(self) { + if(self.mask_image) { + self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); + self.maskCtx.drawImage(self.mask_image, 0, 0, self.maskCanvas.width, self.maskCanvas.height); + } + } + + async load_sam(url) { + const parsedUrl = new URL(url); + const searchParams = new URLSearchParams(parsedUrl.search); + + const filename = searchParams.get("filename") || ""; + const fileType = searchParams.get("type") || ""; + const subfolder = searchParams.get("subfolder") || ""; + + const data = { + sam_model_name: "auto", + filename: filename, + type: fileType, + subfolder: subfolder + }; + + api.fetchApi('/sam/prepare', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data) + }); + } + + async detect(self) { + const positive_points = []; + const negative_points = []; + + for(const i in self.prompt_points) { + const [is_positive, x, y] = self.prompt_points[i]; + const point = [x,y]; + if(is_positive) { + positive_points.push(point); + } + else + negative_points.push(point); + } + + const data = { + positive_points: positive_points, + negative_points: negative_points, + threshold: self.confidence/100 + }; + + const response = await api.fetchApi('/sam/detect', { + method: 'POST', + headers: { 'Content-Type': 'image/png' }, + body: JSON.stringify(data) + }); + + const blob = await response.blob(); + const url = URL.createObjectURL(blob); + + return new Promise((resolve, reject) => { + const image = new Image(); + image.onload = () => resolve(image); + image.onerror = reject; + image.src = url; + }); + } + + handlePointerDown(self, event) { + if ([0, 2, 5].includes(event.button)) { + event.preventDefault(); + const x = event.offsetX || event.targetTouches[0].clientX - maskRect.left; + const y = event.offsetY || event.targetTouches[0].clientY - maskRect.top; + + const originalX = x * self.image.width / self.pointsCanvas.clientWidth; + const originalY = y * self.image.height / self.pointsCanvas.clientHeight; + + var point = null; + if (event.button == 0) { + // positive + point = [true, originalX, originalY]; + } else { + // negative + point = [false, originalX, originalY]; + } + + self.prompt_points.push(point); + + self.invalidatePointsCanvas(self); + } + } + + async save(self) { + if(!self.mask_image) { + this.close(); + return; + } + + const save_canvas = document.createElement('canvas'); + + const save_ctx = save_canvas.getContext('2d', {willReadFrequently:true}); + save_canvas.width = self.mask_image.width; + save_canvas.height = self.mask_image.height; + + save_ctx.drawImage(self.mask_image, 0, 0, save_canvas.width, save_canvas.height); + + const save_data = save_ctx.getImageData(0, 0, save_canvas.width, save_canvas.height); + + // refine mask image + for (let i = 0; i < save_data.data.length; i += 4) { + if(save_data.data[i]) { + save_data.data[i+3] = 0; + } + else { + save_data.data[i+3] = 255; + } + + save_data.data[i] = 0; + save_data.data[i+1] = 0; + save_data.data[i+2] = 0; + } + + save_ctx.globalCompositeOperation = 'source-over'; + save_ctx.putImageData(save_data, 0, 0); + + const formData = new FormData(); + const filename = "clipspace-mask-" + performance.now() + ".png"; + + const item = + { + "filename": filename, + "subfolder": "", + "type": "temp", + }; + + if(ComfyApp.clipspace.images) + ComfyApp.clipspace.images[0] = item; + + if(ComfyApp.clipspace.widgets) { + const index = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image'); + + if(index >= 0) + ComfyApp.clipspace.widgets[index].value = `${filename} [temp]`; + } + + const dataURL = save_canvas.toDataURL(); + const blob = dataURLToBlob(dataURL); + + let original_url = new URL(this.image.src); + + const original_ref = { filename: original_url.searchParams.get('filename') }; + + let original_subfolder = original_url.searchParams.get("subfolder"); + if(original_subfolder) + original_ref.subfolder = original_subfolder; + + let original_type = original_url.searchParams.get("type"); + if(original_type) + original_ref.type = original_type; + + formData.append('image', blob, filename); + formData.append('original_ref', JSON.stringify(original_ref)); + formData.append('type', "temp"); + + await uploadMask(item, formData); + ComfyApp.onClipspaceEditorSave(); + this.close(); + } +} + +app.registerExtension({ + name: "Comfy.Impact.SAMEditor", + init(app) { + const callback = + function () { + let dlg = ImpactSamEditorDialog.getInstance(); + dlg.show(); + }; + + const context_predicate = () => ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0 + ClipspaceDialog.registerButton("Impact SAM Detector", context_predicate, callback); + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (Array.isArray(nodeData.output) && (nodeData.output.includes("MASK") || nodeData.output.includes("IMAGE"))) { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Open in SAM Detector", + callback: () => { + ComfyApp.copyToClipspace(this); + ComfyApp.clipspace_return_node = this; + + let dlg = ImpactSamEditorDialog.getInstance(); + dlg.show(); + }, + }); + }); + } + } +}); + diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js new file mode 100644 index 0000000000000000000000000000000000000000..16af55aa5af43e8ff523c3cfcdd8363f092e2d06 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js @@ -0,0 +1,182 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; + +async function open_picker(node) { + const resp = await api.fetchApi(`/impact/segs/picker/count?id=${node.id}`); + const body = await resp.text(); + + let cnt = parseInt(body); + + var existingPicker = document.getElementById('impact-picker'); + if (existingPicker) { + existingPicker.parentNode.removeChild(existingPicker); + } + + var gallery = document.createElement('div'); + gallery.id = 'impact-picker'; + + gallery.style.position = "absolute"; + gallery.style.height = "80%"; + gallery.style.width = "80%"; + gallery.style.top = "10%"; + gallery.style.left = "10%"; + gallery.style.display = 'flex'; + gallery.style.flexWrap = 'wrap'; + gallery.style.maxHeight = '600px'; + gallery.style.overflow = 'auto'; + gallery.style.backgroundColor = 'rgba(0,0,0,0.3)'; + gallery.style.padding = '20px'; + gallery.draggable = false; + gallery.style.zIndex = 5000; + + var doneButton = document.createElement('button'); + doneButton.textContent = 'Done'; + doneButton.style.padding = '10px 10px'; + doneButton.style.border = 'none'; + doneButton.style.borderRadius = '5px'; + doneButton.style.fontFamily = 'Arial, sans-serif'; + doneButton.style.fontSize = '16px'; + doneButton.style.fontWeight = 'bold'; + doneButton.style.color = '#fff'; + doneButton.style.background = 'linear-gradient(to bottom, #0070B8, #003D66)'; + doneButton.style.boxShadow = '0 2px 4px rgba(0, 0, 0, 0.4)'; + doneButton.style.margin = "20px"; + doneButton.style.height = "40px"; + + var cancelButton = document.createElement('button'); + cancelButton.textContent = 'Cancel'; + cancelButton.style.padding = '10px 10px'; + cancelButton.style.border = 'none'; + cancelButton.style.borderRadius = '5px'; + cancelButton.style.fontFamily = 'Arial, sans-serif'; + cancelButton.style.fontSize = '16px'; + cancelButton.style.fontWeight = 'bold'; + cancelButton.style.color = '#fff'; + cancelButton.style.background = 'linear-gradient(to bottom, #ff70B8, #ff3D66)'; + cancelButton.style.boxShadow = '0 2px 4px rgba(0, 0, 0, 0.4)'; + cancelButton.style.margin = "20px"; + cancelButton.style.height = "40px"; + + const w = node.widgets.find((w) => w.name == 'picks'); + let prev_selected = w.value.split(',').map(function(item) { + return parseInt(item, 10); + }); + + let images = []; + doneButton.onclick = () => { + var result = ''; + for(let i in images) { + if(images[i].isSelected) { + if(result != '') + result += ', '; + + result += (parseInt(i)+1); + } + } + + w.value = result; + + gallery.parentNode.removeChild(gallery); + } + + cancelButton.onclick = () => { + gallery.parentNode.removeChild(gallery); + } + + var panel = document.createElement('div'); + panel.style.clear = 'both'; + panel.style.width = '100%'; + panel.style.height = '40px'; + panel.style.justifyContent = 'center'; + panel.style.alignItems = 'center'; + panel.style.display = 'flex'; + panel.appendChild(doneButton); + panel.appendChild(cancelButton); + gallery.appendChild(panel); + + var hint = document.createElement('label'); + hint.style.position = 'absolute'; + hint.innerHTML = 'Click: Toggle SelectionCtrl-click: Single Selection'; + gallery.appendChild(hint); + + let max_size = 300; + + for(let i=0; i image.naturalHeight) { + ratio = max_size/image.naturalWidth; + } + else { + ratio = max_size/image.naturalHeight; + } + + let width = image.naturalWidth * ratio; + let height = image.naturalHeight * ratio; + + if(width < height) { + this.style.marginLeft = (200-width)/2+"px"; + } + else{ + this.style.marginTop = (200-height)/2+"px"; + } + + this.style.width = width+"px"; + this.style.height = height+"px"; + this.style.objectFit = 'cover'; + } + + image.addEventListener('click', function(event) { + if(event.ctrlKey) { + for(let i in images) { + if(images[i].isSelected) { + images[i].style.border = 'none'; + images[i].isSelected = false; + } + } + + image.style.border = '2px solid #006699'; + image.isSelected = true; + + return; + } + + if(image.isSelected) { + image.style.border = 'none'; + image.isSelected = false; + } + else { + image.style.border = '2px solid #006699'; + image.isSelected = true; + } + }); + + gallery.appendChild(image); + } + + document.body.appendChild(gallery); +} + + +app.registerExtension({ + name: "Comfy.Impack.Picker", + + nodeCreated(node, app) { + if(node.comfyClass == "ImpactSEGSPicker") { + node.addWidget("button", "pick", "image", () => { + open_picker(node); + }); + } + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/mask-rect-area-advanced.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/mask-rect-area-advanced.js new file mode 100644 index 0000000000000000000000000000000000000000..9bb30c2dd6b12d930eee88ec164afd1e0eb9ced1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/mask-rect-area-advanced.js @@ -0,0 +1,459 @@ +import { app } from "../../scripts/app.js"; +import { readLinkedNumber, getDrawColor, computeCanvasSize } from "./common.js"; +function showPreviewCanvas(node, app) { + + const widget = { + type: "customCanvas", + name: "mask-rect-area-canvas", + get value() { + return this.canvas.value; + }, + set value(x) { + this.canvas.value = x; + }, + draw: function (ctx, node, widgetWidth, widgetY) { + + // If we are initially offscreen when created we wont have received a resize event + // Calculate it here instead + if (!node.canvasHeight) { + computeCanvasSize(node, node.size, 220, 240); + } + + const visible = true; + const t = ctx.getTransform(); + const margin = 12; + const border = 2; + const widgetHeight = node.canvasHeight; + + // Keep preview in sync when inputs are driven by links. + syncLinkedInputsToPropertiesAdvanced(node); + + const width = Math.max(1, Math.round(node.properties["width"])); + const height = Math.max(1, Math.round(node.properties["height"])); + const scale = Math.min( + (widgetWidth - margin * 3) / width, + (widgetHeight - margin * 3) / height + ); + const blurRadius = node.properties["blur_radius"] || 0; + const index = 0; + + Object.assign(this.canvas.style, { + left: `${t.e}px`, + top: `${t.f + (widgetY * t.d)}px`, + width: `${widgetWidth * t.a}px`, + height: `${widgetHeight * t.d}px`, + position: "absolute", + zIndex: 1, + fontSize: `${t.d * 10.0}px`, + pointerEvents: "none" + }); + + this.canvas.hidden = !visible; + + let backgroundWidth = width * scale; + let backgroundHeight = height * scale; + + let xOffset = margin; + if (backgroundWidth < widgetWidth) { + xOffset += (widgetWidth - backgroundWidth) / 2 - margin; + } + let yOffset = (margin / 2); + if (backgroundHeight < widgetHeight) { + yOffset += (widgetHeight - backgroundHeight) / 2 - margin; + } + + let widgetX = xOffset; + widgetY = widgetY + yOffset; + + // Draw the background border + ctx.fillStyle = globalThis.LiteGraph.WIDGET_OUTLINE_COLOR; + ctx.fillRect(widgetX - border, widgetY - border, backgroundWidth + border * 2, backgroundHeight + border * 2) + + // Draw the main background area + ctx.fillStyle = globalThis.LiteGraph.WIDGET_BGCOLOR; + ctx.fillRect(widgetX, widgetY, backgroundWidth, backgroundHeight); + + // Draw the conditioning zone + let [x, y, w, h] = getDrawArea(node, backgroundWidth, backgroundHeight); + + ctx.fillStyle = getDrawColor(0, "80"); + ctx.fillRect(widgetX + x, widgetY + y, w, h); + ctx.beginPath(); + ctx.lineWidth = 1; + + // Draw grid lines + for (let x = 0; x <= width / 64; x += 1) { + ctx.moveTo(widgetX + x * 64 * scale, widgetY); + ctx.lineTo(widgetX + x * 64 * scale, widgetY + backgroundHeight); + } + + for (let y = 0; y <= height / 64; y += 1) { + ctx.moveTo(widgetX, widgetY + y * 64 * scale); + ctx.lineTo(widgetX + backgroundWidth, widgetY + y * 64 * scale); + } + + ctx.strokeStyle = "#66666650"; + ctx.stroke(); + ctx.closePath(); + + // Draw current zone + let [sx, sy, sw, sh] = getDrawArea(node, backgroundWidth, backgroundHeight); + + ctx.fillStyle = getDrawColor(0, "80"); + ctx.fillRect(widgetX + sx, widgetY + sy, sw, sh); + + ctx.fillStyle = getDrawColor(0, "40"); + ctx.fillRect(widgetX + sx + border, widgetY + sy + border, sw - border * 2, sh - border * 2); + + // Draw white border around the current zone + ctx.strokeStyle = globalThis.LiteGraph.NODE_SELECTED_TITLE_COLOR; + ctx.lineWidth = 2; + ctx.strokeRect(widgetX + sx, widgetY + sy, sw, sh); + + // Display + ctx.beginPath(); + + ctx.arc(LiteGraph.NODE_SLOT_HEIGHT * 0.5, LiteGraph.NODE_SLOT_HEIGHT * (index + 0.5) + 4, 4, 0, Math.PI * 2); + ctx.fill(); + + ctx.lineWidth = 1; + ctx.strokeStyle = "white"; + ctx.stroke(); + + ctx.lineWidth = 1; + ctx.closePath(); + + // Draw progress bar canvas + if (backgroundWidth < widgetWidth) { + xOffset += (widgetWidth - backgroundWidth) / 2 - margin; + } + + // Adjust X and Y coordinates + const barHeight = 8; + let widgetYBar = widgetY + backgroundHeight + margin; + + // Draw the border around the progress bar + ctx.fillStyle = globalThis.LiteGraph.WIDGET_OUTLINE_COLOR; + ctx.fillRect( + widgetX - border, + widgetYBar - border, + backgroundWidth + border * 2, + barHeight + border * 2 + ); + + // Draw the main bar area (background) + ctx.fillStyle = globalThis.LiteGraph.WIDGET_BGCOLOR; + ctx.fillRect( + widgetX, + widgetYBar, + backgroundWidth, + barHeight + ); + + // Draw progress bar grid + ctx.beginPath(); + ctx.lineWidth = 1; + ctx.strokeStyle = "#66666650"; + + // Calculate the number of grid lines based on the bar size + const numLines = Math.floor(backgroundWidth / 64); + + // Draw grid lines + for (let x = 0; x <= width / 64; x += 1) { + ctx.moveTo(widgetX + x * 64 * scale, widgetYBar); + ctx.lineTo(widgetX + x * 64 * scale, widgetYBar + barHeight); + } + ctx.stroke(); + ctx.closePath(); + + // Draw progress (based on blur_radius) + const progress = Math.min(blurRadius / 255, 1); + ctx.fillStyle = "rgba(0, 120, 255, 0.5)"; + + ctx.fillRect( + widgetX, + widgetYBar, + backgroundWidth * progress, + barHeight + ); + } + }; + + widget.canvas = document.createElement("canvas"); + widget.canvas.className = "mask-rect-area-canvas"; + widget.parent = node; + + widget.computeLayoutSize = function (node) { + return { + minHeight: 200, + maxHeight: 300 + }; + }; + + document.body.appendChild(widget.canvas); + node.addCustomWidget(widget); + + app.canvas.onDrawBackground = function () { + // Draw node isnt fired once the node is off the screen + // if it goes off screen quickly, the input may not be removed + // this shifts it off screen so it can be moved back if the node is visible. + for (let n in app.graph._nodes) { + n = app.graph._nodes[n]; + for (let w in n.widgets) { + let wid = n.widgets[w]; + if (Object.hasOwn(wid, "canvas")) { + wid.canvas.style.left = -8000 + "px"; + wid.canvas.style.position = "absolute"; + } + } + } + }; + + node.onResize = function (size) { + computeCanvasSize(node, size, 220, 240); + }; + + return {minWidth: 200, minHeight: 200, widget}; +} + +app.registerExtension({ + name: "drltdata.MaskRectAreaAdvanced", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name !== "MaskRectAreaAdvanced") { + return; + } + + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + + this.setProperty("width", 512); + this.setProperty("height", 512); + this.setProperty("x", 0); + this.setProperty("y", 0); + this.setProperty("w", 256); + this.setProperty("h", 256); + this.setProperty("blur_radius", 0); + + this.selected = false; + this.index = 3; + this.serialize_widgets = true; + + // If the node already provides widgets from Python/ComfyUI, do NOT recreate them + const hasExisting = Array.isArray(this.widgets) && this.widgets.some(w => w && w.name === "x"); + + // Helper: attach callbacks to existing widgets to keep node.properties in sync (canvas preview). + const hookWidget = (node, widgetName, propName, opts) => { + if (!Array.isArray(node.widgets)) { + return; + } + const w = node.widgets.find(ww => ww && ww.name === widgetName); + if (!w) { + return; + } + + const min = (opts && typeof opts.min === "number") ? opts.min : undefined; + const max = (opts && typeof opts.max === "number") ? opts.max : undefined; + const step = (opts && typeof opts.step === "number") ? opts.step : undefined; + + if (node.properties && Object.prototype.hasOwnProperty.call(node.properties, propName)) { + w.value = node.properties[propName]; + } else { + node.properties[propName] = w.value; + } + + const prevCb = w.callback; + w.callback = function (v, ...args) { + let val = v; + if (typeof val === "number") { + if (typeof step === "number" && step > 0) { + const s = step / 10; + val = Math.round(val / s) * s; + } else { + val = Math.round(val); + } + if (typeof min === "number") { + val = Math.max(min, val); + } + if (typeof max === "number") { + val = Math.min(max, val); + } + } + this.value = val; + node.properties[propName] = val; + if (prevCb) { + return prevCb.call(this, val, ...args); + } + }; + }; + + if (hasExisting) { + hookWidget(this, "x", "x", {"step": 10}); + hookWidget(this, "y", "y", {"step": 10}); + hookWidget(this, "width", "w", {"step": 10}); + hookWidget(this, "height", "h", {"step": 10}); + hookWidget(this, "image_width", "width", {"step": 10}); + hookWidget(this, "image_height", "height", {"step": 10}); + hookWidget(this, "blur_radius", "blur_radius", {"min": 0, "max": 255, "step": 10}); + } else { + CUSTOM_INT(this, "x", 0, function (v, _, node) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + node.properties["x"] = this.value; + }); + CUSTOM_INT(this, "y", 0, function (v, _, node) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + node.properties["y"] = this.value; + }); + CUSTOM_INT(this, "width", 256, function (v, _, node) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + node.properties["w"] = this.value; + }); + CUSTOM_INT(this, "height", 256, function (v, _, node) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + node.properties["h"] = this.value; + }); + CUSTOM_INT(this, "image_width", 512, function (v, _, node) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + node.properties["width"] = this.value; + }); + CUSTOM_INT(this, "image_height", 512, function (v, _, node) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + node.properties["height"] = this.value; + }); + CUSTOM_INT(this, "blur_radius", 0, function (v, _, node) { + this.value = Math.round(v) || 0; + node.properties["blur_radius"] = this.value; + }, + {"min": 0, "max": 255, "step": 10} + ); + } + + showPreviewCanvas(this, app); + + this.onSelected = function () { + this.selected = true; + }; + this.onDeselected = function () { + this.selected = false; + }; + + return r; + }; + } +}); + +// Calculate the drawing area using individual properties. +function getDrawArea(node, backgroundWidth, backgroundHeight) { + let x = node.properties["x"] * backgroundWidth / node.properties["width"]; + let y = node.properties["y"] * backgroundHeight / node.properties["height"]; + let w = node.properties["w"] * backgroundWidth / node.properties["width"]; + let h = node.properties["h"] * backgroundHeight / node.properties["height"]; + + if (x > backgroundWidth) { + x = backgroundWidth; + } + if (y > backgroundHeight) { + y = backgroundHeight; + } + + if (x + w > backgroundWidth) { + w = Math.max(0, backgroundWidth - x); + } + + if (y + h > backgroundHeight) { + h = Math.max(0, backgroundHeight - y); + } + + return [x, y, w, h]; +} + +function CUSTOM_INT(node, inputName, val, func, config = {}) { + return { + widget: node.addWidget( + "number", + inputName, + val, + func, + Object.assign({}, {min: 0, max: 4096, step: 640, precision: 0}, config) + ) + }; +} + +function syncLinkedInputsToPropertiesAdvanced(node) { + let changed = false; + + const vx = readLinkedNumber(node, "x"); + if (vx != null) { + const nv = Math.max(0, Math.round(vx)); + if (node.properties["x"] !== nv) { + node.properties["x"] = nv; + changed = true; + } + } + + const vy = readLinkedNumber(node, "y"); + if (vy != null) { + const nv = Math.max(0, Math.round(vy)); + if (node.properties["y"] !== nv) { + node.properties["y"] = nv; + changed = true; + } + } + + // Input "width" is the rectangle width in px -> property "w" + const vw = readLinkedNumber(node, "width"); + if (vw != null) { + const nv = Math.max(0, Math.round(vw)); + if (node.properties["w"] !== nv) { + node.properties["w"] = nv; + changed = true; + } + } + + // Input "height" is the rectangle height in px -> property "h" + const vh = readLinkedNumber(node, "height"); + if (vh != null) { + const nv = Math.max(0, Math.round(vh)); + if (node.properties["h"] !== nv) { + node.properties["h"] = nv; + changed = true; + } + } + + // Image size (must be >=1 to avoid division by zero in getDrawArea) + const viw = readLinkedNumber(node, "image_width"); + if (viw != null) { + const nv = Math.max(1, Math.round(viw)); + if (node.properties["width"] !== nv) { + node.properties["width"] = nv; + changed = true; + } + } + + const vih = readLinkedNumber(node, "image_height"); + if (vih != null) { + const nv = Math.max(1, Math.round(vih)); + if (node.properties["height"] !== nv) { + node.properties["height"] = nv; + changed = true; + } + } + + const vbr = readLinkedNumber(node, "blur_radius"); + if (vbr != null) { + const nv = Math.max(0, Math.min(255, Math.round(vbr))); + if (node.properties["blur_radius"] !== nv) { + node.properties["blur_radius"] = nv; + changed = true; + } + } + + return changed; +} + diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/js/mask-rect-area.js b/zavodik/nodes/ComfyUI-Impact-Pack/js/mask-rect-area.js new file mode 100644 index 0000000000000000000000000000000000000000..44dcdb126bff1a51f33218fc08c6497d2738289e --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/js/mask-rect-area.js @@ -0,0 +1,494 @@ +import { app } from "../../scripts/app.js"; +import { readLinkedNumber, getDrawColor, computeCanvasSize } from "./common.js"; +function showPreviewCanvas(node, app) { + + const widget = { + type: "customCanvas", + name: "mask-rect-area-canvas", + get value() { + return this.canvas.value; + }, + set value(x) { + this.canvas.value = x; + }, + draw: function (ctx, node, widgetWidth, widgetY) { + + // If we are initially offscreen when created we wont have received a resize event + // Calculate it here instead + if (!node.canvasHeight) { + computeCanvasSize(node, node.size, 200, 200); + } + + const visible = true; + const t = ctx.getTransform(); + const margin = 12; + const border = 2; + const widgetHeight = node.canvasHeight; + const width = 512; + const height = 512; + const scale = Math.min((widgetWidth - margin * 3) / width, (widgetHeight - margin * 3) / height); + const blurRadius = node.properties["blur_radius"] || 0; + const index = 0; + + Object.assign(this.canvas.style, { + left: `${t.e}px`, + top: `${t.f + (widgetY * t.d)}px`, + width: `${widgetWidth * t.a}px`, + height: `${widgetHeight * t.d}px`, + position: "absolute", + zIndex: 1, + fontSize: `${t.d * 10.0}px`, + pointerEvents: "none" + }); + + this.canvas.hidden = !visible; + + let backgroundWidth = width * scale; + let backgroundHeight = height * scale; + let xOffset = margin; + if (backgroundWidth < widgetWidth) { + xOffset += (widgetWidth - backgroundWidth) / 2 - margin; + } + let yOffset = (margin / 2); + if (backgroundHeight < widgetHeight) { + yOffset += (widgetHeight - backgroundHeight) / 2 - margin; + } + + let widgetX = xOffset; + widgetY = widgetY + yOffset; + + // Draw the background border + ctx.fillStyle = globalThis.LiteGraph.WIDGET_OUTLINE_COLOR; + ctx.fillRect(widgetX - border, widgetY - border, backgroundWidth + border * 2, backgroundHeight + border * 2); + + // Draw the main background area + ctx.fillStyle = globalThis.LiteGraph.WIDGET_BGCOLOR; + ctx.fillRect(widgetX, widgetY, backgroundWidth, backgroundHeight); + + // Keep preview in sync when inputs are driven by links. + syncLinkedInputsToProperties(node); + + // Draw the conditioning zone + let [x, y, w, h] = getDrawArea(node, backgroundWidth, backgroundHeight); + + ctx.fillStyle = getDrawColor(0, "80"); + ctx.fillRect(widgetX + x, widgetY + y, w, h); + ctx.beginPath(); + ctx.lineWidth = 1; + + // Draw grid lines + for (let x = 0; x <= width / 64; x += 1) { + ctx.moveTo(widgetX + x * 64 * scale, widgetY); + ctx.lineTo(widgetX + x * 64 * scale, widgetY + backgroundHeight); + } + + for (let y = 0; y <= height / 64; y += 1) { + ctx.moveTo(widgetX, widgetY + y * 64 * scale); + ctx.lineTo(widgetX + backgroundWidth, widgetY + y * 64 * scale); + } + + ctx.strokeStyle = "#66666650"; + ctx.stroke(); + ctx.closePath(); + + // Draw current zone + let [sx, sy, sw, sh] = getDrawArea(node, backgroundWidth, backgroundHeight); + + ctx.fillStyle = getDrawColor(0, "80"); + ctx.fillRect(widgetX + sx, widgetY + sy, sw, sh); + + ctx.fillStyle = getDrawColor(0, "40"); + ctx.fillRect(widgetX + sx + border, widgetY + sy + border, sw - border * 2, sh - border * 2); + + // Draw white border around the current zone + ctx.strokeStyle = globalThis.LiteGraph.NODE_SELECTED_TITLE_COLOR; + ctx.lineWidth = 2; + ctx.strokeRect(widgetX + sx, widgetY + sy, sw, sh); + + // Display + ctx.beginPath(); + + ctx.arc(LiteGraph.NODE_SLOT_HEIGHT * 0.5, LiteGraph.NODE_SLOT_HEIGHT * (index + 0.5) + 4, 4, 0, Math.PI * 2); + ctx.fill(); + + ctx.lineWidth = 1; + ctx.strokeStyle = "white"; + ctx.stroke(); + ctx.lineWidth = 1; + ctx.closePath(); + + // Draw progress bar canvas + if (backgroundWidth < widgetWidth) { + xOffset += (widgetWidth - backgroundWidth) / 2 - margin; + } + + const barHeight = 8; + let widgetYBar = widgetY + backgroundHeight + margin; + + // Draw progress bar border + ctx.fillStyle = globalThis.LiteGraph.WIDGET_OUTLINE_COLOR; + ctx.fillRect( + widgetX - border, + widgetYBar - border, + backgroundWidth + border * 2, + barHeight + border * 2 + ); + + // Draw progress bar area + ctx.fillStyle = globalThis.LiteGraph.WIDGET_BGCOLOR; // Mismo color de fondo que el canvas + ctx.fillRect( + widgetX, + widgetYBar, + backgroundWidth, + barHeight + ); + + // Draw progress bar grid + ctx.beginPath(); + ctx.lineWidth = 1; + ctx.strokeStyle = "#66666650"; + + // Determine max lines + const numLines = Math.floor(backgroundWidth / 64); + + // Draw progress bar grid + for (let x = 0; x <= width / 64; x += 1) { + ctx.moveTo(widgetX + x * 64 * scale, widgetYBar); + ctx.lineTo(widgetX + x * 64 * scale, widgetYBar + barHeight); + } + ctx.stroke(); + ctx.closePath(); + + // Draw progress bar + const progress = Math.min(blurRadius / 255, 1); + ctx.fillStyle = "rgba(0, 120, 255, 0.5)"; + + ctx.fillRect( + widgetX, + widgetYBar, + backgroundWidth * progress, + barHeight + ); + } + }; + + widget.canvas = document.createElement("canvas"); + widget.canvas.className = "mask-rect-area-canvas"; + widget.parent = node; + + widget.computeLayoutSize = function (node) { + return { + minHeight: 200, + maxHeight: 300 + }; + }; + + document.body.appendChild(widget.canvas); + node.addCustomWidget(widget); + + app.canvas.onDrawBackground = function () { + // Draw node isnt fired once the node is off the screen + // if it goes off screen quickly, the input may not be removed + // this shifts it off screen so it can be moved back if the node is visible. + for (let n in app.graph._nodes) { + n = app.graph._nodes[n]; + for (let w in n.widgets) { + let wid = n.widgets[w]; + if (Object.hasOwn(wid, "canvas")) { + wid.canvas.style.left = -8000 + "px"; + wid.canvas.style.position = "absolute"; + } + } + } + }; + + node.onResize = function (size) { + computeCanvasSize(node, size, 200, 200); + }; + + return {minWidth: 200, minHeight: 200, widget}; +} + +app.registerExtension({ + name: 'drltdata.MaskRectArea', + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name !== "MaskRectArea") { + return; + } + + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + + this.setProperty("width", 512); + this.setProperty("height", 512); + this.setProperty("x", 0); + this.setProperty("y", 0); + this.setProperty("w", 50); + this.setProperty("h", 50); + this.setProperty("blur_radius", 0); + + this.selected = false; + this.index = 3; + this.serialize_widgets = true; + + // If Python/ComfyUI already created typed widgets, do not recreate them (avoid duplicates). + const hasExisting = Array.isArray(this.widgets) && this.widgets.some(w => w && w.name === "x"); + + // Hook existing widgets to keep node.properties in sync (canvas uses properties). + const hookWidget = (node, widgetName, propName, opts) => { + if (!Array.isArray(node.widgets)) { + return; + } + const w = node.widgets.find(ww => ww && ww.name === widgetName); + if (!w) { + return; + } + + const min = (opts && typeof opts.min === "number") ? opts.min : undefined; + const max = (opts && typeof opts.max === "number") ? opts.max : undefined; + + if (node.properties && Object.prototype.hasOwnProperty.call(node.properties, propName)) { + w.value = node.properties[propName]; + } else { + node.properties[propName] = w.value; + } + + const prevCb = w.callback; + w.callback = function (v, ...args) { + let val = v; + + if (typeof val === "number") { + val = Math.round(val); + + if (typeof min === "number") { + val = Math.max(min, val); + } + if (typeof max === "number") { + val = Math.min(max, val); + } + } + + this.value = val; + node.properties[propName] = val; + + if (prevCb) { + return prevCb.call(this, val, ...args); + } + }; + }; + + if (hasExisting) { + // Note: "width"/"height" widgets map to "w"/"h" properties (percent-based). + hookWidget(this, "x", "x", {"min": 0, "max": 100}); + hookWidget(this, "y", "y", {"min": 0, "max": 100}); + hookWidget(this, "width", "w", {"min": 0, "max": 100}); + hookWidget(this, "height", "h", {"min": 0, "max": 100}); + hookWidget(this, "blur_radius", "blur_radius", {"min": 0, "max": 255}); + } else { + CUSTOM_INT(this, "x", 0, function (v, _, node) { + this.value = Math.max(0, Math.min(100, Math.round(v))); + node.properties["x"] = this.value; + }); + CUSTOM_INT(this, "y", 0, function (v, _, node) { + this.value = Math.max(0, Math.min(100, Math.round(v))); + node.properties["y"] = this.value; + }); + CUSTOM_INT(this, "w", 50, function (v, _, node) { + this.value = Math.max(0, Math.min(100, Math.round(v))); + node.properties["w"] = this.value; + }); + CUSTOM_INT(this, "h", 50, function (v, _, node) { + this.value = Math.max(0, Math.min(100, Math.round(v))); + node.properties["h"] = this.value; + }); + CUSTOM_INT(this, "blur_radius", 0, function (v, _, node) { + this.value = Math.round(v) || 0; + node.properties["blur_radius"] = this.value; + }, {"min": 0, "max": 255, "step": 10}); + + // If Python widgets exist, they will be used instead; this is back-compat only. + } + + showPreviewCanvas(this, app); + + // Sync linked input values -> node.properties so the preview updates when driven by connections. + const prevOnExecute = this.onExecute; + this.onExecute = function () { + const rr = prevOnExecute ? prevOnExecute.apply(this, arguments) : undefined; + + const readLinkedInt = (inputName) => { + if (!Array.isArray(this.inputs)) { + return null; + } + const inp = this.inputs.find(i => i && i.name === inputName); + if (!inp || !inp.link) { + return null; + } + try { + const v = this.getInputData(inputName); + return (typeof v === "number") ? v : null; + } catch (e) { + return null; + } + }; + + let changed = false; + + const vx = readLinkedInt("x"); + if (vx != null) { + const nv = Math.max(0, Math.min(100, Math.round(vx))); + if (this.properties["x"] !== nv) { + this.properties["x"] = nv; + changed = true; + } + } + + const vy = readLinkedInt("y"); + if (vy != null) { + const nv = Math.max(0, Math.min(100, Math.round(vy))); + if (this.properties["y"] !== nv) { + this.properties["y"] = nv; + changed = true; + } + } + + const vw = readLinkedInt("width"); + if (vw != null) { + const nv = Math.max(0, Math.min(100, Math.round(vw))); + if (this.properties["w"] !== nv) { + this.properties["w"] = nv; + changed = true; + } + } + + const vh = readLinkedInt("height"); + if (vh != null) { + const nv = Math.max(0, Math.min(100, Math.round(vh))); + if (this.properties["h"] !== nv) { + this.properties["h"] = nv; + changed = true; + } + } + + const vbr = readLinkedInt("blur_radius"); + if (vbr != null) { + const nv = Math.max(0, Math.min(255, Math.round(vbr))); + if (this.properties["blur_radius"] !== nv) { + this.properties["blur_radius"] = nv; + changed = true; + } + } + + if (changed) { + this.setDirtyCanvas(true, true); + if (this.graph) { + this.graph.setDirtyCanvas(true, true); + } + } + + return rr; + }; + + this.onSelected = function () { + this.selected = true; + }; + this.onDeselected = function () { + this.selected = false; + }; + + return r; + }; + } +}); + + +// Calculate the drawing area using percentage-based properties. +function getDrawArea(node, backgroundWidth, backgroundHeight) { + // Convert percentages to actual pixel values based on the background dimensions + let x = (node.properties["x"] / 100) * backgroundWidth; + let y = (node.properties["y"] / 100) * backgroundHeight; + let w = (node.properties["w"] / 100) * backgroundWidth; + let h = (node.properties["h"] / 100) * backgroundHeight; + + // Ensure the values do not exceed the background boundaries + if (x > backgroundWidth) { + x = backgroundWidth; + } + if (y > backgroundHeight) { + y = backgroundHeight; + } + + // Adjust width and height to fit within the background dimensions + if (x + w > backgroundWidth) { + w = Math.max(0, backgroundWidth - x); + } + if (y + h > backgroundHeight) { + h = Math.max(0, backgroundHeight - y); + } + + return [x, y, w, h]; +} + +function CUSTOM_INT(node, inputName, val, func, config = {}) { + return { + widget: node.addWidget( + "number", + inputName, + val, + func, + Object.assign({}, {min: 0, max: 100, step: 10, precision: 0}, config) + ) + }; +} + +function syncLinkedInputsToProperties(node) { + let changed = false; + + const vx = readLinkedNumber(node, "x"); + if (vx != null) { + const nv = Math.max(0, Math.min(100, Math.round(vx))); + if (node.properties["x"] !== nv) { + node.properties["x"] = nv; + changed = true; + } + } + + const vy = readLinkedNumber(node, "y"); + if (vy != null) { + const nv = Math.max(0, Math.min(100, Math.round(vy))); + if (node.properties["y"] !== nv) { + node.properties["y"] = nv; + changed = true; + } + } + + const vw = readLinkedNumber(node, "width"); + if (vw != null) { + const nv = Math.max(0, Math.min(100, Math.round(vw))); + if (node.properties["w"] !== nv) { + node.properties["w"] = nv; + changed = true; + } + } + + const vh = readLinkedNumber(node, "height"); + if (vh != null) { + const nv = Math.max(0, Math.min(100, Math.round(vh))); + if (node.properties["h"] !== nv) { + node.properties["h"] = nv; + changed = true; + } + } + + const vbr = readLinkedNumber(node, "blur_radius"); + if (vbr != null) { + const nv = Math.max(0, Math.min(255, Math.round(vbr))); + if (node.properties["blur_radius"] !== nv) { + node.properties["blur_radius"] = nv; + changed = true; + } + } + + return changed; +} diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/latent.png b/zavodik/nodes/ComfyUI-Impact-Pack/latent.png new file mode 100644 index 0000000000000000000000000000000000000000..a961c8dbc4a8f237ddd13eea00a1ee4c8055b6de --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/latent.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1faf0cc926b0d65c8ab93e7485aea816283eb22fdeefe9682c206948c1de2043 +size 2814 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/locales/ko/nodeDefs.json b/zavodik/nodes/ComfyUI-Impact-Pack/locales/ko/nodeDefs.json new file mode 100644 index 0000000000000000000000000000000000000000..eab19927e42ccbeed5f72cf5b044c0e147a6cb7e --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/locales/ko/nodeDefs.json @@ -0,0 +1,1241 @@ +{ + "FaceDetailer": { + "description": "감지 모델(bbox, segm, sam) 모델을 이용해서 입력 이미지에서 자동으로 특정 객체를 감지하고, 감지 영역을 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.\n사용자들이 자주 사용하는 얼굴 디테일 강화 워크플로를 단순화하기 위해 특화 시킨 노드이긴 하지만, 감지 모델에 따라서 다양한 자동 인페인트 용도로 사용 가능합니다.", + "display_name": "얼굴 디테일러", + "inputs": { + "image": { + "name": "이미지" + }, + "model": { + "name": "모델", + "tooltip": "만약 `ImpactDummyInput` 을 연결 하면, 인페인트 단계를 건너 뜁니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "positive": { + "name": "긍정 조건" + }, + "negative": { + "name": "부정 조건" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "bbox_threshold": { + "name": "bbox 감지 임계치", + "tooltip": "사각 영역(bbox) 감지 모델의 최소 감지 임계치를 설정합니다. 임계치가 높을수록 확실한 객체만 감지하지만, 객체를 감지하지 못할 확률이 증가합니다." + }, + "bbox_dilation": { + "name": "bbox 확장", + "tooltip": "감지된 사각 영역(bbox)을 확장 합니다. 이 옵션은 감지된 영역보다 더 넓은 영역을 인페인트 할 경우 사용합니다.\n주의: sam 모델을 사용할 경우 잘라낸 영역내에서 bbox 확장을 하더라도, sam 감지 영역이 작으면 여전히 제한됩니다." + }, + "bbox_crop_factor": { + "name": "bbox 자르기 배율", + "tooltip": "감지된 사각 영역(bbox)의 몇배 크기의 영역을 잘라낼 것 인지를 설정합니다. 이 크기가 너무 작으면, 인페인트 할 이미지의 주변 정보가 부족해서 위화감이 강한 이미지가 생성됩니다. 이 크기가 너무 크면, 인페인팅에 너무 오랜 시간이 걸릴 수 있으며, 모델의 역량을 초과할 정도로 클 경우 올바르지 않은 이미지를 생성하게 됩니다." + }, + "sam_detection_hint": { + "name": "sam 감지 힌트", + "tooltip": "[실험기능] 오래된 실험 기능으로, sam 모델의 감지 힌트를 제공하는 방식입니다. cetner-1 (중앙점 1개) 외에는 사용하지 않을 것을 권장합니다." + }, + "sam_dilation": { + "name": "sam 마스크 확장", + "tooltip": "sam 모델로 감지된 실루엣 마스크를 확장합니다." + }, + "sam_threshold": { + "name": "sam 감지 임계치", + "tooltip": "sam 모델의 최소 감지 임계치를 설정합니다. 임계치가 높을수록 확실한 객체만 감지하지만, 객체를 감지하지 못할 확률이 증가합니다." + }, + "sam_bbox_expansion": { + "name": "sam 영역 확장", + "tooltip": "sam 의 감지 영역을 확장합니다. 감지 영역은 마스크를 포함하는 전체 사각 영역입니다.\n주의1:sam 마스크를 확장하더라도, 감지 영역을 벗어날 수 없습니다.\n주의2: bbox 확장을 하더라도, sam 감지 영역이 작으면 여전히 제한됩니다." + }, + "sam_mask_hint_threshold": { + "name": "sam 마스크 힌트 임계치", + "tooltip": "[실험기능] 오래된 실험 기능으로, mask-hint 모드에서만 사용되는 옵션으로, 마스크에서 이 크기 이상의 점 마스크를 sam의 힌트로 사용합니다." + }, + "sam_mask_hint_use_negative": { + "name": "sam 마스크 힌트에 제외 힌트 사용", + "tooltip": "[실험기능] 오래된 실험 기능으로, mask-hitn 모드에서만 사용되는 옵션으로, sam 마스크 힌트 임계치보다 작은 점 마스크를 sam의 제외 힌트로 사용합니다." + }, + "drop_size": { + "name": "감지 최소 크기", + "tooltip": "사각 영역(bbox) 감지기로 감지한 크기가 이 설정값 보다 작을 경우 무시합니다." + }, + "bbox_detector": { + "name": "bbox 감지기", + "tooltip": "디테일 개선 대상을 자동으로 감지해주는 사각 영역(bbox) 감지기 입력.\n이 감지기로 감지된 감지 정보가 기준 정보입니다." + }, + "wildcard": { + "name": "와일드카드 프롬프트", + "tooltip": "'와일드카드 인코더 (Impact)'와 유사한 기능을 수행하여, 와일드카드 기능과 로라 로딩 기능을 제공합니다. 또한, 감지된 영역별로 다른 프롬프트를 적용하는 기능들을 제공합니다.\n더 자세한 정보는 튜토리얼 페이지를 참고하세요.\n주의:이 입력을 비워두면, 이 입력은 무시됩니다." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "sam_model_opt": { + "name": "sam 모델", + "tooltip": "이 모델을 제공할 경우 sam 모델을 감지 보조 모델로 사용합니다. bbox 감지기로 감지된 사각 영역에 sam 모델을 적용해서 정교한 실루엣 마스크를 생성합니다.\n주의: 이 입력이 연결될 경우 segm 감지기는 무시됩니다." + }, + "segm_detector_opt": { + "name": "segm 감지기", + "tooltip": "이 모델을 제공할 경우 segm 모델을 감지 보조 모델로 사용합니다. bbox 감지기로 감지된 사각 영역에 segm 감지기로 감지된 실루엣 마스크를 생성합니다.\n주의: 이 입력은 sam 모델이 연결될 경우 무시됩니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + }, + "tiled_encode": { + "name": "타일 인코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 인코드'를 사용할 경우, 기본 'VAE 인코드' 대신 'VAE 인코드 (타일)' 을 적용합니다." + }, + "tiled_decode": { + "name": "타일 디코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 다코드'를 사용할 경우, 기본 'VAE 디코드' 대신 'VAE 디코드 (타일)' 을 적용합니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미" + }, + "1": { + "name": "잘라낸 이미지" + }, + "2": { + "name": "잘라낸 투명 이미지" + }, + "3": { + "name": "마스크" + }, + "4": { + "name": "디테일러 파이프" + }, + "5": { + "name": "컨트롤넷 이미지" + } + } + }, + "FaceDetailerPipe": { + "description": "감지 모델(bbox, segm, sam) 모델을 이용해서 입력 이미지에서 자동으로 특정 객체를 감지하고, 감지 영역을 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.\n사용자들이 자주 사용하는 얼굴 디테일 강화 워크플로를 단순화하기 위해 특화 시킨 노드이긴 하지만, 감지 모델에 따라서 다양한 자동 인페인트 용도로 사용 가능합니다.", + "display_name": "얼굴 디테일러 (파이프)", + "inputs": { + "image": { + "name": "이미지" + }, + "detailer_pipe": { + "name": "디테일러 파이프", + "tooltip": "만약 디테일러 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "bbox_threshold": { + "name": "bbox 감지 임계치", + "tooltip": "사각 영역(bbox) 감지 모델의 최소 감지 임계치를 설정합니다. 임계치가 높을수록 확실한 객체만 감지하지만, 객체를 감지하지 못할 확률이 증가합니다." + }, + "bbox_dilation": { + "name": "bbox 확장", + "tooltip": "감지된 사각 영역(bbox)을 확장 합니다. 이 옵션은 감지된 영역보다 더 넓은 영역을 인페인트 할 경우 사용합니다.\n주의: sam 모델을 사용할 경우 잘라낸 영역내에서 bbox 확장을 하더라도, sam 감지 영역이 작으면 여전히 제한됩니다." + }, + "bbox_crop_factor": { + "name": "bbox 자르기 배율", + "tooltip": "감지된 사각 영역(bbox)의 몇배 크기의 영역을 잘라낼 것 인지를 설정합니다. 이 크기가 너무 작으면, 인페인트 할 이미지의 주변 정보가 부족해서 위화감이 강한 이미지가 생성됩니다. 이 크기가 너무 크면, 인페인팅에 너무 오랜 시간이 걸릴 수 있으며, 모델의 역량을 초과할 정도로 클 경우 올바르지 않은 이미지를 생성하게 됩니다." + }, + "sam_detection_hint": { + "name": "sam 감지 힌트", + "tooltip": "[실험기능] 오래된 실험 기능으로, sam 모델의 감지 힌트를 제공하는 방식입니다. cetner-1 (중앙점 1개) 외에는 사용하지 않을 것을 권장합니다." + }, + "sam_dilation": { + "name": "sam 마스크 확장", + "tooltip": "sam 모델로 감지된 실루엣 마스크를 확장합니다." + }, + "sam_threshold": { + "name": "sam 감지 임계치", + "tooltip": "sam 모델의 최소 감지 임계치를 설정합니다. 임계치가 높을수록 확실한 객체만 감지하지만, 객체를 감지하지 못할 확률이 증가합니다." + }, + "sam_bbox_expansion": { + "name": "sam 영역 확장", + "tooltip": "sam 의 감지 영역을 확장합니다. 감지 영역은 마스크를 포함하는 전체 사각 영역입니다.\n주의1:sam 마스크를 확장하더라도, 감지 영역을 벗어날 수 없습니다.\n주의2: bbox 확장을 하더라도, sam 감지 영역이 작으면 여전히 제한됩니다." + }, + "sam_mask_hint_threshold": { + "name": "sam 마스크 힌트 임계치", + "tooltip": "[실험기능] 오래된 실험 기능으로, mask-hint 모드에서만 사용되는 옵션으로, 마스크에서 이 크기 이상의 점 마스크를 sam의 힌트로 사용합니다." + }, + "sam_mask_hint_use_negative": { + "name": "sam 마스크 힌트에 제외 힌트 사용", + "tooltip": "[실험기능] 오래된 실험 기능으로, mask-hitn 모드에서만 사용되는 옵션으로, sam 마스크 힌트 임계치보다 작은 점 마스크를 sam의 제외 힌트로 사용합니다." + }, + "drop_size": { + "name": "감지 최소 크기", + "tooltip": "사각 영역(bbox) 감지기로 감지한 크기가 이 설정값 보다 작을 경우 무시합니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + }, + "tiled_encode": { + "name": "타일 인코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 인코드'를 사용할 경우, 기본 'VAE 인코드' 대신 'VAE 인코드 (타일)' 을 적용합니다." + }, + "tiled_decode": { + "name": "타일 디코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 다코드'를 사용할 경우, 기본 'VAE 디코드' 대신 'VAE 디코드 (타일)' 을 적용합니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미" + }, + "1": { + "name": "잘라낸 이미지" + }, + "2": { + "name": "잘라낸 투명 이미지" + }, + "3": { + "name": "마스크" + }, + "4": { + "name": "디테일러 파이프" + }, + "5": { + "name": "컨트롤넷 이미지" + } + } + }, + "DetailerForEach": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.", + "display_name": "디테일러 (SEGS)", + "inputs": { + "image": { + "name": "이미지" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "model": { + "name": "모델", + "tooltip": "만약 `ImpactDummyInput` 을 연결 하면, 인페인트 단계를 건너 뜁니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "positive": { + "name": "긍정 조건" + }, + "negative": { + "name": "부정 조건" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "wildcard": { + "name": "와일드카드 프롬프트", + "tooltip": "'와일드카드 인코더 (Impact)'와 유사한 기능을 수행하여, 와일드카드 기능과 로라 로딩 기능을 제공합니다. 또한, 감지된 영역별로 다른 프롬프트를 적용하는 기능들을 제공합니다.\n더 자세한 정보는 튜토리얼 페이지를 참고하세요.\n주의:이 입력을 비워두면, 이 입력은 무시됩니다." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + }, + "tiled_encode": { + "name": "타일 인코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 인코드'를 사용할 경우, 기본 'VAE 인코드' 대신 'VAE 인코드 (타일)' 을 적용합니다." + }, + "tiled_decode": { + "name": "타일 디코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 다코드'를 사용할 경우, 기본 'VAE 디코드' 대신 'VAE 디코드 (타일)' 을 적용합니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미지" + } + } + }, + "DetailerForEachPipe": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.", + "display_name": "디테일러 (상세/SEGS/파이프)", + "inputs": { + "image": { + "name": "이미지" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "basic_pipe": { + "name": "기본 파이프", + "tooltip": "만약 기본 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "refiner_basic_pipe_opt": { + "name": "리파이너 기본 파이프", + "tooltip": "SDXL 리파이너 단계에 적용할 기본 파이프를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + }, + "tiled_encode": { + "name": "타일 인코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 인코드'를 사용할 경우, 기본 'VAE 인코드' 대신 'VAE 인코드 (타일)' 을 적용합니다." + }, + "tiled_decode": { + "name": "타일 디코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 다코드'를 사용할 경우, 기본 'VAE 디코드' 대신 'VAE 디코드 (타일)' 을 적용합니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미지" + }, + "1": { + "name": "segs" + }, + "2": { + "name": "기본 파이프" + }, + "3": { + "name": "컨트롤넷 이미지" + } + } + }, + "DetailerForEachDebug": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.", + "display_name": "디테일러 (상세/SEGS)", + "inputs": { + "image": { + "name": "이미지" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "model": { + "name": "모델", + "tooltip": "만약 `ImpactDummyInput` 을 연결 하면, 인페인트 단계를 건너 뜁니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "positive": { + "name": "긍정 조건" + }, + "negative": { + "name": "부정 조건" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "wildcard": { + "name": "와일드카드 프롬프트", + "tooltip": "'와일드카드 인코더 (Impact)'와 유사한 기능을 수행하여, 와일드카드 기능과 로라 로딩 기능을 제공합니다. 또한, 감지된 영역별로 다른 프롬프트를 적용하는 기능들을 제공합니다.\n더 자세한 정보는 튜토리얼 페이지를 참고하세요.\n주의:이 입력을 비워두면, 이 입력은 무시됩니다." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + }, + "tiled_encode": { + "name": "타일 인코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 인코드'를 사용할 경우, 기본 'VAE 인코드' 대신 'VAE 인코드 (타일)' 을 적용합니다." + }, + "tiled_decode": { + "name": "타일 디코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 다코드'를 사용할 경우, 기본 'VAE 디코드' 대신 'VAE 디코드 (타일)' 을 적용합니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미지" + }, + "1": { + "name": "잘라낸 이미지" + }, + "2": { + "name": "잘라낸 개선 이미지" + }, + "3": { + "name": "잘라낸 투명 개선 이미지" + }, + "4": { + "name": "컨트롤넷 이미지" + } + } + }, + "DetailerForEachDebugPipe": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.", + "display_name": "디테일러 (상세/SEGS/파이프)", + "inputs": { + "image": { + "name": "이미지" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "basic_pipe": { + "name": "기본 파이프", + "tooltip": "만약 기본 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "refiner_basic_pipe_opt": { + "name": "리파이너 기본 파이프", + "tooltip": "SDXL 리파이너 단계에 적용할 기본 파이프를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + }, + "tiled_encode": { + "name": "타일 인코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 인코드'를 사용할 경우, 기본 'VAE 인코드' 대신 'VAE 인코드 (타일)' 을 적용합니다." + }, + "tiled_decode": { + "name": "타일 디코드 사용", + "tooltip": "이 옵션을 켜면, 내부적으로 'VAE 다코드'를 사용할 경우, 기본 'VAE 디코드' 대신 'VAE 디코드 (타일)' 을 적용합니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미지" + }, + "1": { + "name": "개선 SEGS" + }, + "2": { + "name": "기본 파이프" + }, + "3": { + "name": "잘라낸 이미지" + }, + "4": { + "name": "잘라낸 개선 이미지" + }, + "5": { + "name": "잘라낸 투명 개선 이미지" + }, + "6": { + "name": "컨트롤넷 이미지" + } + } + }, + "DetailerForEachPipeForAnimateDiff": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.\n이 노드는 AnimateDiff와 같은 동영상의 디테일 개선을 위한 특수 디테일러 노드로써, SEGS가 담고 있는 마스크가 여러 프레임에 걸친 배치 마스크가 되는 경우를 처리할 수 있습니다.", + "display_name": "디테일러 (AnimateDiff/파이프)", + "inputs": { + "image_frames": { + "name": "이미지 프레임 묶음" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "basic_pipe": { + "name": "기본 파이프", + "tooltip": "만약 기본 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "refiner_basic_pipe_opt": { + "name": "리파이너 기본 파이프", + "tooltip": "SDXL 리파이너 단계에 적용할 기본 파이프를 연결합니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + } + }, + "outputs": { + "0": { + "name": "개선 SEGS" + }, + "1": { + "name": "개선 이미지" + }, + "2": { + "name": "기본 파이프" + }, + "3": { + "name": "컨트롤넷 이미지" + } + } + }, + "SEGSDetailerForAnimateDiff": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.\n이 노드는 원본 이미지가 아닌 SEGS를 대상으로 적용되는 노드로 원본 이미지에 적용하려면 'SEGS 붙여넣기' 노드를 사용하세요.\n이 노드는 AnimateDiff와 같은 동영상의 디테일 개선을 위한 특수 디테일러 노드로써, SEGS가 담고 있는 마스크가 여러 프레임에 걸친 배치 마스크가 되는 경우를 처리할 수 있습니다.", + "display_name": "SEGS 디테일러 (AnimateDiff/파이프)", + "inputs": { + "image_frames": { + "name": "이미지 프레임 묶음" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "basic_pipe": { + "name": "기본 파이프", + "tooltip": "만약 기본 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "refiner_basic_pipe_opt": { + "name": "리파이너 기본 파이프", + "tooltip": "SDXL 리파이너 단계에 적용할 기본 파이프를 연결합니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + } + }, + "outputs": { + "0": { + "name": "개선 SEGS" + }, + "1": { + "name": "개선 이미지" + } + } + }, + "SEGSDetailer": { + "description": "감지 영역 정보 묶음(SEGS)내의 각 영역들에 대해 가이드 크기를 기반으로 확대해서 인페이트하는 방법으로 디테일을 강화합니다.\n이 노드는 원본 이미지가 아닌 SEGS를 대상으로 적용되는 노드로 원본 이미지에 적용하려면 'SEGS 붙여넣기' 노드를 사용하세요.", + "display_name": "SEGS 디테일러 (파이프)", + "inputs": { + "image": { + "name": "이미지" + }, + "segs": { + "name": "segs", + "tooltip": "감지 영역 정보를 담고 있는 묶음.\n이 영역들을 대상으로 인페인트가 적용됩니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "noise_mask": { + "name": "노이즈 마스크 사용", + "tooltip": "인페인트 할 때, 마스크를 적용해서 마스크 영역만 인페인트합니다. 이 옵션을 적용하지 않으면, 잘라낸 이미지 전체가 재생성되어 노이즈 제거양이 클 때 위화감이 나타나게 됩니다." + }, + "force_inpaint": { + "name": "인페인트 강제 적용", + "tooltip": "가이드 크기와 상관 없이 인페인팅을 무조건 적용 합니다. 이 옵션이 꺼져있는 경우 가이드 크기보다 이미 큰 감지 영역은 인페인팅을 건너 뜁니다." + }, + "basic_pipe": { + "name": "기본 파이프", + "tooltip": "만약 기본 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "batch_size": { + "name": "배치 갯수", + "tooltip": "대상 SEGS 에 대해서 배치 갯수만큼 여러개의 후보를 생성합니다. 여러개를 생성할 경우 '고르기 (SEGS)'와 함께 사용하세요." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "refiner_basic_pipe_opt": { + "name": "리파이너 기본 파이프", + "tooltip": "SDXL 리파이너 단계에 적용할 기본 파이프를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + } + }, + "outputs": { + "0": { + "name": "개선 SEGS" + }, + "1": { + "name": "컨트롤넷 이미지" + } + } + }, + "MaskDetailerPipe": { + "description": "이 디테일러 노드는 마스크로 설정된 영역을 확대해서 가이드 크기를 기반으로 확대해서 인페인트하는 방법으로 디테일을 강화합니다.", + "display_name": "마스크 디테일러 (파이프)", + "inputs": { + "image": { + "name": "이미지" + }, + "mask": { + "name": "마스크", + "tooltip": "디테일을 강화하고 싶은 대상 영역이 설정된 마스크. 분리된 마스크 영역은 개별적으로 디테일 강화가 이루어집니다." + }, + "basic_pipe": { + "name": "기본 파이프", + "tooltip": "만약 기본 파이프 내의 모델에 `ImpactDummyInput` 가 설정된 경우, 인페인트 단계를 건너 뜁니다." + }, + "guide_size": { + "name": "가이드 크기", + "tooltip": "'가이드 크기 대상'으로 지정된 크기의 가장 짧은면을 이 크기까지 확대합니다." + }, + "guide_size_for": { + "name": "가이드 크기 대상", + "tooltip": "bbox: 감지된 사각 영역(bbox)\ncrop_region: 잘라낸 영역" + }, + "max_size": { + "name": "최대 크기", + "tooltip": "'가이드 크기'로 확대 할 때, 가장 긴 면의 길이를 이 크기로 제한합니다. 너무 크게 확대 되는 것을 막아줍니다." + }, + "mask_mode": { + "name": "마스크 모드", + "tooltip": "마스크로 설정된 영역만을 인페인트 할지, 잘라낸 영역 전체를 인페인트 할 것인지를 설정합니다." + }, + "seed": { + "name": "시드" + }, + "steps": { + "name": "스텝수" + }, + "sampler_name": { + "name": "샘플러 이름" + }, + "scheduler": { + "name": "스케쥴러" + }, + "denoise": { + "name": "노이즈 제거양" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "확대 해서 인페인트 된 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "crop_factor": { + "name": "자르기 배율", + "tooltip": "각 마스크로 설정된 영역에 대해서 몇배 크기를 잘라내어서 인페인트에 사용할지를 설정합니다. 이 크기가 너무 작으면, 인페인트 할 이미지의 주변 정보가 부족해서 위화감이 강한 이미지가 생성됩니다. 이 크기가 너무 크면, 인페인팅에 너무 오랜 시간이 걸릴 수 있으며, 모델의 역량을 초과할 정도로 클 경우 올바르지 않은 이미지를 생성하게 됩니다." + }, + "drop_size": { + "name": "감지 최소 크기", + "tooltip": "사각 영역(bbox) 감지기로 감지한 크기가 이 설정값 보다 작을 경우 무시합니다." + }, + "refiner_ratio": { + "name": "라파이너 적용 비율", + "tooltip": "SDXL 리파이너 모델을 사용할 경우 적용될 후반 스텝수 비율을 설정합니다." + }, + "batch_size": { + "name": "배치 갯수", + "tooltip": "대상 SEGS 에 대해서 배치 갯수만큼 여러개의 후보를 생성합니다. 여러개를 생성할 경우 '고르기 (SEGS)'와 함께 사용하세요." + }, + "cycle": { + "name": "반복수", + "tooltip": "설정된 값만큼 인페인팅을 반복 적용합니다. 인코딩/디코딩 없이 확대된 잠재 이미지 단계에서 반복됩니다." + }, + "refiner_basic_pipe_opt": { + "name": "리파이너 기본 파이프", + "tooltip": "SDXL 리파이너 단계에 적용할 기본 파이프를 연결합니다." + }, + "detailer_hook": { + "name": "디테일러 후크", + "tooltip": "이 노드의 실행 중간단계에서 여러가지 기능을 수행할 수 있는 후크를 연결합니다." + }, + "inpaint_model": { + "name": "인페인트 모델 모드", + "tooltip": "인페인트 전용 모델을 사용할 경우 이 옵션을 켜면, 인페인팅시에 '인페인트 모델 조건 설정'이 적용되어 수행됩니다." + }, + "noise_mask_feather": { + "name": "노이즈 마스크 가장자리 흐림", + "tooltip": "인페인트시에 적용되는 노이즈 마스크의 가장자리를 흐리게 합니다. 이 설정값이 0을 초과할 경우, 내부적으로 자동으로 '차등 확산' 노드를 적용합니다." + }, + "bbox_fill": { + "name": "bbox 채우기", + "tooltip": "각 마스크 조각들을 해당 마스크를 포함하는 가장 작은 사각 영역의 마스크로 간주합니다." + }, + "contour_fill": { + "name": "윤곽 내부 채우기", + "tooltip": "윤곽선 형태의 마스크 조각들의 경우 마스크 내부가 모두 채워진 것으로 간주합니다." + }, + "scheduler_func_opt": { + "name": "스케쥴러 함수", + "tooltip": "GITS 스케쥴러 처럼 기본 스케쥴러 리스트에서 선택할 수 없는 특수 스케쥴러를 사용할 수 있게 해줍니다. 이 입력이 연결되면, 기본 스케쥴러 선택은 무시됩니다." + } + }, + "outputs": { + "0": { + "name": "개선 이미지" + }, + "1": { + "name": "잘라낸 개선 이미지" + }, + "2": { + "name": "잘라낸 투명 개선 이미지" + }, + "3": { + "name": "기본 파이프" + }, + "4": { + "name": "리파이너 기본 파이프" + } + } + }, + + "SEGSPaste": { + "description": "SEGS 디테일러를 통해 개선된 SEGS를 원본 이미지에 붙여넣는 기능을 제공하기 위한 노드입니다.", + "display_name": "SEGS 붙여넣기", + "inputs": { + "image": { + "name": "원본 이미지" + }, + "segs": { + "name": "segs" + }, + "feather": { + "name": "가장자리 흐림", + "tooltip": "개선된 SEGS의 이미지를 원본 이미지에 붙여넣을 때, 이 수치로 마스크의 가장 자리를 흐리게 해서 붙여넣어 이음매의 위화감을 줄여줍니다." + }, + "alpha": { + "name": "투명도", + "tooltip": "원본에 붙여넣는 이미지에 투명도를 설정합니다." + }, + "ref_image_opt": { + "name": "참조 이미지", + "tooltip": "디테일러를 통과시키거나 'SEGS에 기본 이미지 설정'을 한 경우가 아니라면, SEGS는 이미지가 없이 감지 영역 정보만 있습니다. 이 때 감지영역이 참조할 원본 이미지를 설정합니다." + } + }, + "outputs": { + "0": { + "name": "개선 SEGS" + } + } + }, + + "ImpactSEGSPicker": { + "description": "입력된 SEGS 중에서 선택된 SEGS만을 고를 수 있는 있는 기능을 제공합니다.", + "display_name": "고르기 (SEGS)", + "inputs": { + "picks": { + "name": "선택 목록", + "tooltip": "출력할 SEGS 번호 목록을 나열합니다. 'pick' 버튼을 눌러서 선택하세요." + }, + "segs": { + "name": "segs" + }, + "fallback_image_opt": { + "name": "참조 이미지", + "tooltip": "디테일러를 통과시키거나 'SEGS에 기본 이미지 설정'을 한 경우가 아니라면, SEGS는 이미지가 없이 감지 영역 정보만 있습니다. 이 때 감지영역이 참조할 원본 이미지를 설정합니다." + } + }, + "outputs": { + "0": { + "name": "선택된 SEGS" + } + } + }, + + "SetDefaultImageForSEGS": { + "description": "디테일러를 통과시킨 경우가 아니라면, SEGS는 이미지가 없이 감지 영역 정보만 있습니다. 이 노드는 SEGS에 기본 이미지를 설정해 줍니다.", + "display_name": "SEGS에 기본 이미지 설정", + "inputs": { + "segs": { + "name": "segs" + }, + "image": { + "name": "이미지" + }, + "override": { + "name": "덮어쓰기", + "tooltip": "이미 설정된 이미지가 있는 경우 덮어쓸지 여부를 설정합니다." + } + }, + "outputs": { + "0": { + "name": "segs" + } + } + }, + + "ImpactWildcardProcessor": { + "description": "이 노드는 와일드카드 구문으로 작성된 텍스트 프롬프트를 처리하고, 처리된 텍스트 프롬프트를 출력합니다.\n\nTIP: 워크플로가 실행되기 전에 '와일드카드 텍스트'의 처리 결과가 '채워진(populated) 텍스트'에 표시되며, 이 값은 워크플로와 함께 저장됩니다. 입력으로 변환된 시드를 사용하려면 '와일드카드 텍스트' 대신 '채워진(populated) 텍스트'에 직접 프롬프트를 작성하고, 모드를 '고정(fixed)'로 설정하세요.", + "display_name": "와일드카드 처리기 (Impact)", + "inputs": { + "wildcard_text": { + "name": "와일드카드 텍스트", + "tooltip": "와일드카드 문법으로 작성된 텍스트 프롬프트를 입력하세요." + }, + "populated_text": { + "name": "채워진 텍스트", + "tooltip": "이 노드에 실행 중에 전달되는 실제 값은 여기 표시된 값입니다. 동작은 모드에 따라 약간 다를 수 있으며, '채워진 텍스트'에서도 와일드카드 구문을 사용할 수 있습니다." + }, + "mode": { + "name": "모드", + "tooltip": "채우기(populate): 워크플로를 실행하기 전에 '와일드카드 텍스트'에서 처리된 프롬프트로 '채워진 텍스트'의 기존 값을 덮어씁니다. 이 모드에서는 '채워진 텍스트'를 수정할 수 없습니다.\n\n고정(fixed): '와일드카드 텍스트'를 무시하고 '채워진 텍스트'의 값을 그대로 유지합니다. 이 모드에서는 '채워진 텍스트'를 수정할 수 있습니다.\n\n재현(reproduce): 이 모드는 한 번만 '고정(fixed)' 모드로 작동하여 재현한 후, 이후에는 '채우기(populate)' 모드로 전환됩니다." + }, + "seed": { + "name": "시드", + "tooltip": "와일드카드의 무작위 선택에 사용할 시드 입니다" + }, + "Select to add Wildcard": { + "name": "추가할 와일드카드 선택" + } + }, + "outputs": { + "0": { + "name": "처리된 텍스트" + } + } + }, + + "ImpactWildcardEncode": { + "description": "이 노드는 와일드카드 구문으로 작성된 텍스트 프롬프트를 처리하고 이를 조건으로 출력합니다. 또한 LoRA 구문을 지원하며, 적용된 LoRA는 모델 출력에 반영됩니다.\n\nTIP1: 워크플로가 실행되기 전에 '와일드카드 텍스트'의 처리 결과가 '채워진 텍스트'에 표시되며, 이 값은 워크플로와 함께 저장됩니다. 입력으로 변환된 시드를 사용하려면 '와일드카드 텍스트' 대신 '채워진 텍스트'에 직접 프롬프트를 작성하고, 모드를 '고정(fixed)'로 설정하세요.\nTIP2: 'Inspire Pack'이 설치되어 있으면 LBW(로라 블록 웨이트) 구문도 적용할 수 있습니다.", + "display_name": "와일드카드 인코딩 (Impact)", + "inputs": { + "wildcard_text": { + "name": "와일드카드 텍스트", + "tooltip": "와일드카드 문법으로 작성된 텍스트 프롬프트를 입력하세요." + }, + "populated_text": { + "name": "채워진 텍스트", + "tooltip": "이 노드에 실행 중에 전달되는 실제 값은 여기 표시된 값입니다. 동작은 모드에 따라 약간 다를 수 있으며, '채워진 텍스트'에서도 와일드카드 구문을 사용할 수 있습니다." + }, + "mode": { + "name": "모드", + "tooltip": "채우기(populate): 워크플로를 실행하기 전에 '와일드카드 텍스트'에서 처리된 프롬프트로 '채워진 텍스트'의 기존 값을 덮어씁니다. 이 모드에서는 '채워진 텍스트'를 수정할 수 없습니다.\n\n고정(fixed): '와일드카드 텍스트'를 무시하고 '채워진 텍스트'의 값을 그대로 유지합니다. 이 모드에서는 '채워진 텍스트'를 수정할 수 있습니다.\n\n재현(reproduce): 이 모드는 한 번만 '고정(fixed)' 모드로 작동하여 재현한 후, 이후에는 '채우기(populate)' 모드로 전환됩니다." + }, + "Select to add LoRA": { + "name": "추가할 LoRA 선택" + }, + "Select to add Wildcard": { + "name": "추가할 와일드카드 선택" + }, + "seed": { + "name": "시드", + "tooltip": "와일드카드의 무작위 선택에 사용할 시드 입니다" + } + }, + "outputs": { + "0": { + "name": "model", + "tooltip": "LoRA 적용 문법이 사용된 경우, LoRA 가 적용된 model이 출력됩니다." + }, + "1": { + "name": "clip", + "tooltip": "LoRA 적용 문법이 사용된 경우, LoRA 가 적용된 clip이 출력됩니다." + }, + "2": { + "name": "조건" + }, + "3": { + "name": "채워진 텍스트" + } + } + } +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/animatediff_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/animatediff_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89f24102d56e9b583d8d7c4b994a1ab8d65080ab Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/animatediff_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/bridge_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/bridge_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0dd5a122a03fdd4d1bcb383291baa53adc7472b Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/bridge_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f23f483989e657a550fbccfa055339a913c0de65 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18cca9c4683a4a253ea4b506fb4b4acdc56e95aa --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-313.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9345264e19ea457ace45ad44b703b97c0fc9160b85a1a01358e475ec9b678c98 +size 113008 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/defs.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/defs.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a242a79f462d79c4957cc270b9f840182c87edc Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/defs.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdaea6af4f8390552d30dcb0ff23f81433c7e4bf Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8499a26fb6f24ce4f38ac6eea3ede476a2e307a8 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hook_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hook_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..838038e6fc3b3f3468bbe69b9dd1ec13a10df56e Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hook_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hooks.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hooks.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7c206e58d8ada512c79cfa4488336c7ce58f63d Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hooks.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b3d7c20c62625f9b423e67bc1de1f316890bcdf --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-313.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ac3814d1d7e1ab061e52e0ec9fb0d0e0806068d4dd2ed2b10f663e35662b30 +size 121127 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_sampling.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_sampling.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9958a3da6a4a0b61b9e38a584a1f770e0d2a046 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_sampling.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa40b26c007416c68b20f8cb3aa09d8d1bad3cb3 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e314a89b2257e711a05baecfcf60451895f9a4a Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc215464f0694845f4c5b5a92ab60c2dce2923ae Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..011de45bbeee894b83a5f64e7a66b6168d4970b2 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_upscaler.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_upscaler.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27dd9864b5009ed550c599bcb8ff28306708ea13 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_upscaler.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26b3511b4c8e9558f2468a5f33a11026a40ba197 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a715ca98aecb521165ee119769f1038e8cc00b6d Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad36eff8487e8c3c4d9b4c6ad53b6dc5505293a0 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25903fc9cdb49d5444a4095a5230791433545a0f Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..fe00f1c785977fd5ed23ad7c8a5bc1bdad9afc09 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py @@ -0,0 +1,12 @@ +import sys +import subprocess + + +def ensure_onnx_package(): + try: + import onnxruntime # noqa: F401 + except Exception: + if "python_embeded" in sys.executable or "python_embedded" in sys.executable: + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime']) + else: + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime']) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/animatediff_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/animatediff_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..396dece743ee63d052f57ca7e340572e3590d134 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/animatediff_nodes.py @@ -0,0 +1,200 @@ +from nodes import MAX_RESOLUTION +import impact.core as core +from impact.core import SEG +from impact.segs_nodes import SEGSPaste +import comfy +from impact import utils +import torch +import nodes +import logging + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + logging.warning("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +class SEGSDetailerForAnimateDiff: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "image_frames": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("SEGS", "IMAGE") + RETURN_NAMES = ("segs", "cnet_images") + OUTPUT_IS_LIST = (False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is applied specifically to SEGS rather than the entire image. To apply it to the entire image, use the 'SEGS Paste' node.\nAs a specialized detailer node for improving video details, such as in AnimateDiff, this node can handle cases where the masks contained in SEGS serve as batch masks spanning multiple frames." + + @staticmethod + def do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None, noise_mask_feather=0, scheduler_func_opt=None): + + model, clip, vae, positive, negative = basic_pipe + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + segs = core.segs_scale_match(segs, image_frames.shape) + + new_segs = [] + cnet_image_list = [] + + if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + for seg in segs[1]: + cropped_image_frames = None + + for image in image_frames: + image = image.unsqueeze(0) + cropped_image = seg.cropped_image if seg.cropped_image is not None else utils.crop_tensor4(image, seg.crop_region) + cropped_image = utils.to_tensor(cropped_image) + if cropped_image_frames is None: + cropped_image_frames = cropped_image + else: + cropped_image_frames = torch.concat((cropped_image_frames, cropped_image), dim=0) + + cropped_image_frames = cropped_image_frames.cpu().numpy() + + # It is assumed that AnimateDiff does not support conditioning masks based on test results, but it will be added for future consideration. + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, cropped_image_frames, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, cropped_image_frames, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + + if not (isinstance(model, str) and model == "DUMMY"): + enhanced_image_tensor, cnet_images = core.enhance_detail_for_animatediff(cropped_image_frames, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, seg.cropped_mask, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, + noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt) + else: + enhanced_image_tensor = cropped_image_frames + cnet_images = None + + if cnet_images is not None: + cnet_image_list.extend(cnet_images) + + if enhanced_image_tensor is None: + new_cropped_image = cropped_image_frames + else: + new_cropped_image = enhanced_image_tensor.cpu().numpy() + + new_seg = SEG(new_cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return (segs[0], new_segs), cnet_image_list + + def doit(self, image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + segs, cnet_images = SEGSDetailerForAnimateDiff.do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt, + noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + if len(cnet_images) == 0: + cnet_images = [utils.empty_pil_tensor()] + + return (segs, cnet_images) + + +class DetailerForEachPipeForAnimateDiff: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "image_frames": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") + RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is a specialized detailer node for enhancing video details, such as in AnimateDiff. It can handle cases where the masks contained in SEGS serve as batch masks spanning multiple frames." + + @staticmethod + def doit(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, basic_pipe, refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, + noise_mask_feather=0, scheduler_func_opt=None): + + enhanced_segs = [] + cnet_image_list = [] + + for sub_seg in segs[1]: + single_seg = segs[0], [sub_seg] + enhanced_seg, cnet_images = SEGSDetailerForAnimateDiff().do_detail(image_frames, single_seg, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt, noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + image_frames = SEGSPaste.doit(image_frames, enhanced_seg, feather, alpha=255)[0] + + if cnet_images is not None: + cnet_image_list.extend(cnet_images) + + if detailer_hook is not None: + image_frames = detailer_hook.post_paste(image_frames) + + enhanced_segs += enhanced_seg[1] + + new_segs = segs[0], enhanced_segs + return image_frames, new_segs, basic_pipe, cnet_image_list diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..15a96959ef8cb59f30dfa0a131562e9d9f892289 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py @@ -0,0 +1,490 @@ +import os +from PIL import ImageOps +import logging +import folder_paths +import torch +import nodes +from PIL import Image +import numpy as np +from impact import utils + +# NOTE: this should not be `from . import core`. +# I don't know why but... 'from .' and 'from impact' refer to different core modules. +# This separates global variables of the core module and breaks the preview bridge. +from impact import core +# <-- +import random + + +class PreviewBridge: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + "image": ("STRING", {"default": ""}), + }, + "optional": { + "block": ("BOOLEAN", {"default": False, "label_on": "if_empty_mask", "label_off": "never", "tooltip": "is_empty_mask: If the mask is empty, the execution is stopped.\nnever: The execution is never stopped."}), + "restore_mask": (["never", "always", "if_same_size"], {"tooltip": "if_same_size: If the changed input image is the same size as the previous image, restore using the last saved mask\nalways: Whenever the input image changes, always restore using the last saved mask\nnever: Do not restore the mask.\n`restore_mask` has higher priority than `block`"}), + }, + "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + + FUNCTION = "doit" + + OUTPUT_NODE = True + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "This is a feature that allows you to edit and send a Mask over a image.\nIf the block is set to 'is_empty_mask', the execution is stopped when the mask is empty." + + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prev_hash = None + + @staticmethod + def load_image(pb_id): + is_fail = False + if pb_id not in core.preview_bridge_image_id_map: + is_fail = True + + if not is_fail: + image_path, ui_item = core.preview_bridge_image_id_map[pb_id] + if not os.path.isfile(image_path): + is_fail = True + + if not is_fail: + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + else: + image = utils.empty_pil_tensor() + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + ui_item = { + "filename": 'empty.png', + "subfolder": '', + "type": 'temp' + } + + return image, mask.unsqueeze(0), ui_item + + @staticmethod + def register_clipspace_image(clipspace_path, node_id): + """Register a clipspace image file in the preview bridge system. + + This handles the case where ComfyUI's mask editor creates clipspace files + that need to be integrated with the preview bridge system. + """ + # Remove [input] suffix if present + clean_path = clipspace_path.replace(" [input]", "").replace("[input]", "") + + # Try to find the actual clipspace file + input_dir = folder_paths.get_input_directory() + potential_paths = [ + clean_path, + os.path.join(input_dir, clean_path), + os.path.join(input_dir, "clipspace", os.path.basename(clean_path)), + os.path.abspath(clean_path), + ] + + actual_file = None + for path in potential_paths: + if os.path.isfile(path): + actual_file = path + break + + if not actual_file: + return False + + # Create ui_item for the clipspace file + ui_item = { + 'filename': os.path.basename(actual_file), + 'subfolder': 'clipspace', + 'type': 'input' + } + + # Register it using the preview bridge system + core.set_previewbridge_image(node_id, actual_file, ui_item) + # Also register under the original clipspace path for compatibility + core.preview_bridge_image_id_map[clipspace_path] = (actual_file, ui_item) + + return True + + def doit(self, images, image, unique_id, block=False, restore_mask="never", prompt=None, extra_pnginfo=None): + need_refresh = False + images_changed = False + + # Check if images have changed (this determines if we start fresh) + if unique_id not in core.preview_bridge_cache: + need_refresh = True + images_changed = True + elif core.preview_bridge_cache[unique_id][0] is not images: + need_refresh = True + images_changed = True + + # If images changed, clear the mask cache to ensure fresh start behavior + # This restores the original behavior where new images start with empty masks + # unless restore_mask is set to "always" or "if_same_size" + if images_changed and restore_mask not in ["always", "if_same_size"] and unique_id in core.preview_bridge_last_mask_cache: + del core.preview_bridge_last_mask_cache[unique_id] + + # Handle clipspace files that aren't registered in the preview bridge system + # This only applies when images haven't changed (same image, new mask scenario) + if not need_refresh and image not in core.preview_bridge_image_id_map: + # Check if this is a clipspace file that needs to be registered + is_clipspace = image and ("clipspace" in image.lower() or "[input]" in image) + if is_clipspace: + if not PreviewBridge.register_clipspace_image(image, unique_id): + need_refresh = True + else: + need_refresh = True + + if not need_refresh: + pixels, mask, path_item = PreviewBridge.load_image(image) + image = [path_item] + else: + # For new images (images_changed=True), we want to start fresh regardless of restore_mask + # For same image with refresh needed, respect the restore_mask setting + # Exception: when restore_mask is "always", restore even with new images + # Exception: when restore_mask is "if_same_size", allow restoration to check size compatibility + if restore_mask != "never" and (not images_changed or restore_mask in ["always", "if_same_size"]): + mask = core.preview_bridge_last_mask_cache.get(unique_id) + if mask is None: + mask = None + elif restore_mask == "if_same_size" and mask.shape[1:] != images.shape[1:3]: + # For if_same_size, clear mask if dimensions don't match + mask = None + # For "always", keep the mask regardless of size + else: + mask = None + + if mask is None: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + res = nodes.PreviewImage().save_images(images, filename_prefix="PreviewBridge/PB-", prompt=prompt, extra_pnginfo=extra_pnginfo) + else: + masked_images = utils.tensor_convert_rgba(images) + resized_mask = utils.resize_mask(mask, (images.shape[1], images.shape[2])).unsqueeze(3) + resized_mask = 1 - resized_mask + utils.tensor_putalpha(masked_images, resized_mask) + res = nodes.PreviewImage().save_images(masked_images, filename_prefix="PreviewBridge/PB-", prompt=prompt, extra_pnginfo=extra_pnginfo) + + image2 = res['ui']['images'] + pixels = images + + path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', image2[0]['filename']) + core.set_previewbridge_image(unique_id, path, image2[0]) + core.preview_bridge_image_id_map[image] = (path, image2[0]) + core.preview_bridge_image_name_map[unique_id, path] = (image, image2[0]) + core.preview_bridge_cache[unique_id] = (images, image2) + + image = image2 + + is_empty_mask = torch.all(mask == 0) + + if block and is_empty_mask and core.is_execution_model_version_supported(): + from comfy_execution.graph import ExecutionBlocker + result = ExecutionBlocker(None), ExecutionBlocker(None) + elif block and is_empty_mask: + logging.warning("[Impact Pack] PreviewBridge: ComfyUI is outdated - blocking feature is disabled.") + result = pixels, mask + else: + result = pixels, mask + + if not is_empty_mask: + core.preview_bridge_last_mask_cache[unique_id] = mask + + return { + "ui": {"images": image}, + "result": result, + } + + +def decode_latent(latent, preview_method, vae_opt=None): + if vae_opt is not None: + image = nodes.VAEDecode().decode(vae_opt, latent)[0] + return image + + from comfy.cli_args import LatentPreviewMethod + import comfy.latent_formats as latent_formats + + if preview_method.startswith("TAE"): + decoder_name = None + + if preview_method == "TAESD15": + decoder_name = "taesd" + elif preview_method == 'TAESDXL': + decoder_name = "taesdxl" + elif preview_method == 'TAESD3': + decoder_name = "taesd3" + elif preview_method == 'TAEF1': + decoder_name = "taef1" + + if decoder_name: + vae = nodes.VAELoader().load_vae(decoder_name)[0] + image = nodes.VAEDecode().decode(vae, latent)[0] + return image + + if preview_method == "Latent2RGB-SD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SDXL": + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SD3": + latent_format = latent_formats.SD3() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SD-X4": + latent_format = latent_formats.SD_X4() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-Playground-2.5": + latent_format = latent_formats.SDXL_Playground_2_5() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SC-Prior": + latent_format = latent_formats.SC_Prior() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SC-B": + latent_format = latent_formats.SC_B() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-FLUX.1": + latent_format = latent_formats.Flux() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-LTXV": + latent_format = latent_formats.LTXV() + method = LatentPreviewMethod.Latent2RGB + else: + logging.warning(f"[Impact Pack] PreviewBridgeLatent: '{preview_method}' is unsupported preview method.") + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + + previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) + samples = latent_format.process_in(latent['samples']) + + pil_image = previewer.decode_latent_to_preview(samples) + pixels_size = pil_image.size[0]*8, pil_image.size[1]*8 + resized_image = pil_image.resize(pixels_size, resample=utils.LANCZOS) + + return utils.to_tensor(resized_image).unsqueeze(0) + + +class PreviewBridgeLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent": ("LATENT",), + "image": ("STRING", {"default": ""}), + "preview_method": (["Latent2RGB-FLUX.1", + "Latent2RGB-SDXL", "Latent2RGB-SD15", "Latent2RGB-SD3", + "Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5", + "Latent2RGB-SC-Prior", "Latent2RGB-SC-B", + "Latent2RGB-LTXV", + "TAEF1", "TAESDXL", "TAESD15", "TAESD3"],), + }, + "optional": { + "vae_opt": ("VAE", ), + "block": ("BOOLEAN", {"default": False, "label_on": "if_empty_mask", "label_off": "never", "tooltip": "is_empty_mask: If the mask is empty, the execution is stopped.\nnever: The execution is never stopped. Instead, it returns a white mask."}), + "restore_mask": (["never", "always", "if_same_size"], {"tooltip": "if_same_size: If the changed input latent is the same size as the previous latent, restore using the last saved mask\nalways: Whenever the input latent changes, always restore using the last saved mask\nnever: Do not restore the mask.\n`restore_mask` has higher priority than `block`\nIf the input latent already has a mask, do not restore mask."}), + }, + "hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = ("LATENT", "MASK", ) + + FUNCTION = "doit" + + OUTPUT_NODE = True + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "This is a feature that allows you to edit and send a Mask over a latent image.\nIf the block is set to 'is_empty_mask', the execution is stopped when the mask is empty." + + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prev_hash = None + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + + @staticmethod + def load_image(pb_id): + is_fail = False + if pb_id not in core.preview_bridge_image_id_map: + is_fail = True + + if not is_fail: + image_path, ui_item = core.preview_bridge_image_id_map[pb_id] + if not os.path.isfile(image_path): + is_fail = True + + if not is_fail: + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = None + else: + image = utils.empty_pil_tensor() + mask = None + ui_item = { + "filename": 'empty.png', + "subfolder": '', + "type": 'temp' + } + + return image, mask, ui_item + + def doit(self, latent, image, preview_method, vae_opt=None, block=False, unique_id=None, restore_mask='never', prompt=None, extra_pnginfo=None): + latent_channels = latent['samples'].shape[1] + + if 'SD3' in preview_method or 'SC-Prior' in preview_method or 'FLUX.1' in preview_method or 'TAEF1' == preview_method: + preview_method_channels = 16 + elif 'LTXV' in preview_method: + preview_method_channels = 128 + else: + preview_method_channels = 4 + + if vae_opt is None and latent_channels != preview_method_channels: + logging.warning("[PreviewBridgeLatent] The version of latent is not compatible with preview_method.\nSD3, SD1/SD2, SDXL, SC-Prior, SC-B and FLUX.1 are not compatible with each other.") + raise Exception("The version of latent is not compatible with preview_method.SD3, SD1/SD2, SDXL, SC-Prior, SC-B and FLUX.1 are not compatible with each other.") + + need_refresh = False + latent_changed = False + + # Check if latent has changed + if unique_id not in core.preview_bridge_cache: + need_refresh = True + latent_changed = True + elif (core.preview_bridge_cache[unique_id][0] is not latent + or (vae_opt is None and core.preview_bridge_cache[unique_id][2] is not None) + or (vae_opt is None and core.preview_bridge_cache[unique_id][1] != preview_method) + or (vae_opt is not None and core.preview_bridge_cache[unique_id][2] is not vae_opt)): + need_refresh = True + latent_changed = True + + # If latent changed, clear the mask cache to ensure fresh start behavior + # unless restore_mask is set to "always" or "if_same_size" + if latent_changed and restore_mask not in ["always", "if_same_size"] and unique_id in core.preview_bridge_last_mask_cache: + del core.preview_bridge_last_mask_cache[unique_id] + + # Handle clipspace files that aren't registered in the preview bridge system + # This only applies when latent hasn't changed (same latent, new mask scenario) + if not need_refresh and image not in core.preview_bridge_image_id_map: + is_clipspace = image and ("clipspace" in image.lower() or "[input]" in image) + if is_clipspace: + if not PreviewBridge.register_clipspace_image(image, unique_id): + need_refresh = True + else: + need_refresh = True + + if not need_refresh: + pixels, mask, path_item = PreviewBridge.load_image(image) + + if mask is None: + mask = torch.ones(latent['samples'].shape[2:], dtype=torch.float32, device="cpu").unsqueeze(0) + if 'noise_mask' in latent: + res_latent = latent.copy() + del res_latent['noise_mask'] + else: + res_latent = latent + + is_empty_mask = True + else: + res_latent = latent.copy() + res_latent['noise_mask'] = mask + + is_empty_mask = torch.all(mask == 1) + + res_image = [path_item] + else: + decoded_image = decode_latent(latent, preview_method, vae_opt) + + if 'noise_mask' in latent: + mask = latent['noise_mask'].squeeze(0) # 4D mask -> 3D mask + + decoded_pil = utils.to_pil(decoded_image) + + inverted_mask = 1 - mask # invert + resized_mask = utils.resize_mask(inverted_mask, (decoded_image.shape[1], decoded_image.shape[2])) + result_pil = utils.apply_mask_alpha_to_pil(decoded_pil, resized_mask) + + full_output_folder, filename, counter, _, _ = folder_paths.get_save_image_path("PreviewBridge/PBL-"+self.prefix_append, folder_paths.get_temp_directory(), result_pil.size[0], result_pil.size[1]) + file = f"{filename}_{counter}.png" + result_pil.save(os.path.join(full_output_folder, file), compress_level=4) + res_image = [{ + 'filename': file, + 'subfolder': 'PreviewBridge', + 'type': 'temp', + }] + + is_empty_mask = False + else: + # For new latents (latent_changed=True), start fresh regardless of restore_mask + # For same latent with refresh needed, respect the restore_mask setting + # Exception: when restore_mask is "always", restore even with new latents + # Exception: when restore_mask is "if_same_size", allow restoration to check size compatibility + if restore_mask != "never" and (not latent_changed or restore_mask in ["always", "if_same_size"]): + mask = core.preview_bridge_last_mask_cache.get(unique_id) + if mask is None: + mask = None + elif restore_mask == "if_same_size" and mask.shape[1:] != decoded_image.shape[1:3]: + # For if_same_size, clear mask if dimensions don't match + mask = None + # For "always", keep the mask regardless of size + else: + mask = None + + if mask is None: + mask = torch.ones(latent['samples'].shape[2:], dtype=torch.float32, device="cpu").unsqueeze(0) + res = nodes.PreviewImage().save_images(decoded_image, filename_prefix="PreviewBridge/PBL-", prompt=prompt, extra_pnginfo=extra_pnginfo) + else: + masked_images = utils.tensor_convert_rgba(decoded_image) + resized_mask = utils.resize_mask(mask, (decoded_image.shape[1], decoded_image.shape[2])).unsqueeze(3) + resized_mask = 1 - resized_mask + utils.tensor_putalpha(masked_images, resized_mask) + res = nodes.PreviewImage().save_images(masked_images, filename_prefix="PreviewBridge/PBL-", prompt=prompt, extra_pnginfo=extra_pnginfo) + + res_image = res['ui']['images'] + + is_empty_mask = torch.all(mask == 1) + + path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', res_image[0]['filename']) + core.set_previewbridge_image(unique_id, path, res_image[0]) + core.preview_bridge_image_id_map[image] = (path, res_image[0]) + core.preview_bridge_image_name_map[unique_id, path] = (image, res_image[0]) + core.preview_bridge_cache[unique_id] = (latent, preview_method, vae_opt, res_image) + + res_latent = latent + + if block and is_empty_mask and core.is_execution_model_version_supported(): + from comfy_execution.graph import ExecutionBlocker + result = ExecutionBlocker(None), ExecutionBlocker(None) + elif block and is_empty_mask: + logging.warning("[Impact Pack] PreviewBridgeLatent: ComfyUI is outdated - blocking feature is disabled.") + result = res_latent, mask + else: + result = res_latent, mask + + if not is_empty_mask: + core.preview_bridge_last_mask_cache[unique_id] = mask + + return { + "ui": {"images": res_image}, + "result": result, + } \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/config.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/config.py new file mode 100644 index 0000000000000000000000000000000000000000..50be0bcdd4ec53f0e6284dfc7e8c7674418c4ca1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/config.py @@ -0,0 +1,78 @@ +import configparser +import logging +import os + +version_code = [8, 28, 2] +version = f"V{version_code[0]}.{version_code[1]}" + (f'.{version_code[2]}' if len(version_code) > 2 else '') + +my_path = os.path.dirname(__file__) +old_config_path = os.path.join(my_path, "impact-pack.ini") +config_path = os.path.join(my_path, "..", "..", "impact-pack.ini") +latent_letter_path = os.path.join(my_path, "..", "..", "latent.png") + + +def write_config(): + config = configparser.ConfigParser() + config['default'] = { + 'sam_editor_cpu': str(get_config()['sam_editor_cpu']), + 'sam_editor_model': get_config()['sam_editor_model'], + 'custom_wildcards': get_config()['custom_wildcards'], + 'disable_gpu_opencv': get_config()['disable_gpu_opencv'], + 'wildcard_cache_limit_mb': str(get_config()['wildcard_cache_limit_mb']), + } + with open(config_path, 'w') as configfile: + config.write(configfile) + + +def read_config(): + try: + config = configparser.ConfigParser() + config.read(config_path) + default_conf = config['default'] + + # Strip quotes from custom_wildcards path if present + custom_wildcards_path = default_conf.get('custom_wildcards', '').strip('\'"') + + if not os.path.exists(custom_wildcards_path): + logging.warning(f"[Impact Pack] custom_wildcards path not found: {custom_wildcards_path}. Using default path.") + custom_wildcards_path = os.path.join(my_path, "..", "..", "custom_wildcards") + + default_conf['custom_wildcards'] = custom_wildcards_path + + # Parse wildcard_cache_limit_mb with default value of 50MB + cache_limit_mb = 50 + if 'wildcard_cache_limit_mb' in default_conf: + try: + cache_limit_mb = float(default_conf['wildcard_cache_limit_mb']) + except ValueError: + logging.warning(f"[Impact Pack] Invalid wildcard_cache_limit_mb value: {default_conf['wildcard_cache_limit_mb']}. Using default: 50") + cache_limit_mb = 50 + + return { + 'sam_editor_cpu': default_conf['sam_editor_cpu'].lower() == 'true' if 'sam_editor_cpu' in default_conf else False, + 'sam_editor_model': default_conf['sam_editor_model'].lower() if 'sam_editor_model' else 'sam_vit_b_01ec64.pth', + 'custom_wildcards': default_conf['custom_wildcards'] if 'custom_wildcards' in default_conf else os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")), + 'disable_gpu_opencv': default_conf['disable_gpu_opencv'].lower() == 'true' if 'disable_gpu_opencv' in default_conf else True, + 'wildcard_cache_limit_mb': cache_limit_mb + } + + except Exception: + return { + 'sam_editor_cpu': False, + 'sam_editor_model': 'sam_vit_b_01ec64.pth', + 'custom_wildcards': os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")), + 'disable_gpu_opencv': True, + 'wildcard_cache_limit_mb': 50 + } + + +cached_config = None + + +def get_config(): + global cached_config + + if cached_config is None: + cached_config = read_config() + + return cached_config diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/core.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/core.py new file mode 100644 index 0000000000000000000000000000000000000000..8722d1fe9dc2af5e01d085af54677299d275fabb --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/core.py @@ -0,0 +1,2417 @@ +import os +import warnings + +import torch +from segment_anything import SamPredictor + +from comfy_extras.nodes_custom_sampler import Noise_RandomNoise +from collections import namedtuple +import numpy as np +from PIL import ImageOps, Image + +import nodes +from server import PromptServer +import comfy +import impact.wildcards as wildcards +import math +import cv2 +import time +from comfy import model_management +from impact import utils +from impact import impact_sampling +from concurrent.futures import ThreadPoolExecutor +import inspect +from collections import OrderedDict +import torch.nn.functional as F +import logging +import sys +import importlib + + +is_sam2_available = importlib.util.find_spec("sam2") +sam2_unavailable_message = f"\n----------------------------------------------------------------------------\n[Impact Pack] The SAM2 functionality is unavailable because the `facebook/sam2` dependency is not installed.\n\nInstallation command:\n{sys.executable} -m pip install git+https://github.com/facebookresearch/sam2\n----------------------------------------------------------------------------\n" +if is_sam2_available: + from sam2.sam2_image_predictor import SAM2ImagePredictor + from sam2.build_sam import build_sam2, build_sam2_video_predictor +else: + logging.warning(sam2_unavailable_message) + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + logging.warning("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +SEG = namedtuple("SEG", + ['cropped_image', 'cropped_mask', 'confidence', 'crop_region', 'bbox', 'label', 'control_net_wrapper'], + defaults=[None]) + +pb_id_cnt = time.time() +preview_bridge_image_id_map = {} +preview_bridge_image_name_map = {} + +preview_bridge_cache = {} +preview_bridge_last_mask_cache = {} + +current_prompt = None + +ADDITIONAL_SCHEDULERS = ['AYS SDXL', 'AYS SD1', 'AYS SVD', 'GITS[coeff=1.2]', 'LTXV[default]', 'OSS FLUX', 'OSS Wan', 'OSS Chroma'] + +def get_schedulers(): + return list(comfy.samplers.SCHEDULER_HANDLERS) + ADDITIONAL_SCHEDULERS + +def is_execution_model_version_supported(): + try: + import comfy_execution # noqa: F401 + return True + except Exception: + return False + + +def set_previewbridge_image(node_id, file, item): + global pb_id_cnt + + if file in preview_bridge_image_name_map: + pb_id = preview_bridge_image_name_map[node_id, file] + if pb_id.startswith(f"${node_id}"): + return pb_id + + pb_id = f"${node_id}-{pb_id_cnt}" + preview_bridge_image_id_map[pb_id] = (file, item) + preview_bridge_image_name_map[node_id, file] = (pb_id, item) + if os.path.isfile(file): + i = Image.open(file) + i = ImageOps.exif_transpose(i) + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + preview_bridge_last_mask_cache[node_id] = mask.unsqueeze(0) + pb_id_cnt += 1 + + return pb_id + + +def erosion_mask(mask, grow_mask_by): + mask = utils.make_2d_mask(mask) + + w = mask.shape[1] + h = mask.shape[0] + + device = comfy.model_management.get_torch_device() + mask = mask.clone().to(device) + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear").to(device) + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)).to(device) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round().cpu() + + +# CREDIT: https://github.com/BlenderNeko/ComfyUI_Noise/blob/afb14757216257b12268c91845eac248727a55e2/nodes.py#L68 +# https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 +def slerp(val, low, high): + dims = low.shape + + low = low.reshape(dims[0], -1) + high = high.reshape(dims[0], -1) + + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + + low_norm[low_norm != low_norm] = 0.0 + high_norm[high_norm != high_norm] = 0.0 + + omega = torch.acos((low_norm*high_norm).sum(1)) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + + return res.reshape(dims) + + +def mix_noise(from_noise, to_noise, strength, variation_method): + if variation_method == 'slerp': + mixed_noise = slerp(strength, from_noise, to_noise) + else: + # linear + mixed_noise = (1 - strength) * from_noise + strength * to_noise + + # NOTE: Since the variance of the Gaussian noise in mixed_noise has changed, it must be corrected through scaling. + scale_factor = math.sqrt((1 - strength) ** 2 + strength ** 2) + mixed_noise /= scale_factor + + return mixed_noise + + +class REGIONAL_PROMPT: + def __init__(self, mask, sampler, variation_seed=0, variation_strength=0.0, variation_method='linear'): + mask = utils.make_2d_mask(mask) + + self.mask = mask + self.sampler = sampler + self.mask_erosion = None + self.erosion_factor = None + self.variation_seed = variation_seed + self.variation_strength = variation_strength + self.variation_method = variation_method + + def clone_with_sampler(self, sampler): + rp = REGIONAL_PROMPT(self.mask, sampler) + rp.mask_erosion = self.mask_erosion + rp.erosion_factor = self.erosion_factor + rp.variation_seed = self.variation_seed + rp.variation_strength = self.variation_strength + rp.variation_method = self.variation_method + return rp + + def get_mask_erosion(self, factor): + if self.mask_erosion is None or self.erosion_factor != factor: + self.mask_erosion = erosion_mask(self.mask, factor) + self.erosion_factor = factor + + return self.mask_erosion + + def touch_noise(self, noise): + if self.variation_strength > 0.0: + mask = utils.make_3d_mask(self.mask) + mask = utils.resize_mask(mask, (noise.shape[2], noise.shape[3])).unsqueeze(0) + + regional_noise = Noise_RandomNoise(self.variation_seed).generate_noise({'samples': noise}) + mixed_noise = mix_noise(noise, regional_noise, self.variation_strength, variation_method=self.variation_method) + + return (mask == 1).float() * mixed_noise + (mask == 0).float() * noise + + return noise + + +class NO_BBOX_DETECTOR: + pass + + +class NO_SEGM_DETECTOR: + pass + + +def create_segmasks(results): + bboxs = results[1] + segms = results[2] + confidence = results[3] + + results = [] + for i in range(len(segms)): + item = (bboxs[i], segms[i].astype(np.float32), confidence[i]) + results.append(item) + return results + + +def gen_detection_hints_from_mask_area(x, y, mask, threshold, use_negative): + mask = utils.make_2d_mask(mask) + + points = [] + plabs = [] + + # minimum sampling step >= 3 + y_step = max(3, int(mask.shape[0] / 20)) + x_step = max(3, int(mask.shape[1] / 20)) + + for i in range(0, len(mask), y_step): + for j in range(0, len(mask[i]), x_step): + if mask[i][j] > threshold: + points.append((x + j, y + i)) + plabs.append(1) + elif use_negative and mask[i][j] == 0: + points.append((x + j, y + i)) + plabs.append(0) + + return points, plabs + + +def gen_negative_hints(w, h, x1, y1, x2, y2): + npoints = [] + nplabs = [] + + # minimum sampling step >= 3 + y_step = max(3, int(w / 20)) + x_step = max(3, int(h / 20)) + + for i in range(10, h - 10, y_step): + for j in range(10, w - 10, x_step): + if not (x1 - 10 <= j and j <= x2 + 10 and y1 - 10 <= i and i <= y2 + 10): + npoints.append((j, i)) + nplabs.append(0) + + return npoints, nplabs + + +def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, bbox, seed, steps, cfg, + sampler_name, + scheduler, positive, negative, denoise, noise_mask, force_inpaint, + wildcard_opt=None, wildcard_opt_concat_mode=None, + detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None, control_net_wrapper=None, cycle=1, + inpaint_model=False, noise_mask_feather=0, scheduler_func=None, + vae_tiled_encode=False, vae_tiled_decode=False): + + if noise_mask is not None: + noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) + noise_mask = noise_mask.squeeze(3) + + if noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + if wildcard_opt is not None and wildcard_opt != "": + model, _, wildcard_positive = wildcards.process_with_loras(wildcard_opt, model, clip) + + if wildcard_opt_concat_mode == "concat": + positive = nodes.ConditioningConcat().concat(positive, wildcard_positive)[0] + else: + positive = wildcard_positive + positive = [positive[0].copy()] + if 'pooled_output' in wildcard_positive[0][1]: + positive[0][1]['pooled_output'] = wildcard_positive[0][1]['pooled_output'] + elif 'pooled_output' in positive[0][1]: + del positive[0][1]['pooled_output'] + + h = image.shape[1] + w = image.shape[2] + + bbox_h = bbox[3] - bbox[1] + bbox_w = bbox[2] - bbox[0] + + # Skip processing if the detected bbox is already larger than the guide_size + if not force_inpaint and bbox_h >= guide_size and bbox_w >= guide_size: + logging.info("Detailer: segment skip (enough big)") + return None, None + + if guide_size_for_bbox: # == "bbox" + # Scale up based on the smaller dimension between width and height. + upscale = guide_size / min(bbox_w, bbox_h) + else: + # for cropped_size + upscale = guide_size / min(w, h) + + new_w = int(w * upscale) + new_h = int(h * upscale) + + # safeguard + if 'aitemplate_keep_loaded' in model.model_options: + max_size = min(4096, max_size) + + if new_w > max_size or new_h > max_size: + upscale *= max_size / max(new_w, new_h) + new_w = int(w * upscale) + new_h = int(h * upscale) + + if not force_inpaint: + if upscale <= 1.0: + logging.info(f"Detailer: segment skip [determined upscale factor={upscale}]") + return None, None + + if new_w == 0 or new_h == 0: + logging.info(f"Detailer: segment skip [zero size={new_w, new_h}]") + return None, None + else: + if upscale <= 1.0 or new_w == 0 or new_h == 0: + logging.info("Detailer: force inpaint") + upscale = 1.0 + new_w = w + new_h = h + + if detailer_hook is not None: + new_w, new_h = detailer_hook.touch_scaled_size(new_w, new_h) + + logging.info(f"Detailer: segment upscale for ({bbox_w, bbox_h}) | crop region {w, h} x {upscale} -> {new_w, new_h}") + + # upscale + upscaled_image = utils.tensor_resize(image, new_w, new_h) + + if detailer_hook is not None: + upscaled_image = detailer_hook.post_upscale(upscaled_image, noise_mask) + + cnet_pils = None + if control_net_wrapper is not None: + positive, negative, cnet_pils = control_net_wrapper.apply(positive, negative, upscaled_image, noise_mask) + model, cnet_pils2 = control_net_wrapper.doit_ipadapter(model) + cnet_pils.extend(cnet_pils2) + + # prepare mask + if detailer_hook is None or not detailer_hook.get_skip_sampling(): + if noise_mask is not None and inpaint_model: + imc_encode = nodes.InpaintModelConditioning().encode + if 'noise_mask' in inspect.signature(imc_encode).parameters: + positive, negative, latent_image = imc_encode(positive, negative, upscaled_image, vae, mask=noise_mask, noise_mask=True) + else: + logging.warning("[Impact Pack] ComfyUI is an outdated version.") + positive, negative, latent_image = imc_encode(positive, negative, upscaled_image, vae, noise_mask) + else: + latent_image = utils.to_latent_image(upscaled_image, vae, vae_tiled_encode=vae_tiled_encode) + if noise_mask is not None: + latent_image['noise_mask'] = noise_mask + + if detailer_hook is not None: + latent_image = detailer_hook.post_encode(latent_image) + + refined_latent = latent_image + + sampler_opt=None + if detailer_hook is not None: + sampler_opt = detailer_hook.get_custom_sampler() + + # ksampler + for i in range(0, cycle): + if detailer_hook is not None: + if detailer_hook is not None: + detailer_hook.set_steps((i, cycle)) + + refined_latent = detailer_hook.cycle_latent(refined_latent) + + model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, upscaled_latent2, denoise2 = \ + detailer_hook.pre_ksample(model, seed+i, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise) + noise, is_touched = detailer_hook.get_custom_noise(seed+i, torch.zeros(latent_image['samples'].size()), is_touched=False) + if not is_touched: + noise = None + else: + model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, _, denoise2 = \ + model, seed + i, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise + noise = None + + refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, + refined_latent, denoise2, refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative, + noise=noise, scheduler_func=scheduler_func, sampler_opt=sampler_opt) + + if detailer_hook is not None: + refined_latent = detailer_hook.pre_decode(refined_latent) + + # non-latent downscale - latent downscale cause bad quality + start = time.time() + if vae_tiled_decode: + (refined_image,) = nodes.VAEDecodeTiled().decode(vae, refined_latent, 512) # using default settings + logging.info(f"[Impact Pack] vae decoded (tiled) in {time.time() - start:.1f}s") + else: + try: + refined_image = vae.decode(refined_latent['samples']) + except Exception: + # usually an out-of-memory exception from the decode, so try a tiled approach + logging.warning(f"[Impact Pack] failed after {time.time() - start:.1f}s, doing vae.decode_tiled 64...") + refined_image = vae.decode_tiled(refined_latent["samples"], tile_x=64, tile_y=64, ) + logging.info(f"[Impact Pack] vae decoded in {time.time() - start:.1f}s") + else: + # skipped + refined_image = upscaled_image + + if detailer_hook is not None: + refined_image = detailer_hook.post_decode(refined_image) + + # downscale + + # workaround: support WAN as an i2i model + if len(refined_image.shape) == 5: + refined_image = refined_image.squeeze(0) + + refined_image = utils.tensor_resize(refined_image, w, h) + + # prevent mixing of device + refined_image = refined_image.cpu() + + # don't convert to latent - latent break image + # preserving pil is much better + return refined_image, cnet_pils + + +def enhance_detail_for_animatediff(image_frames, model, clip, vae, guide_size, guide_size_for_bbox, max_size, bbox, seed, steps, cfg, + sampler_name, + scheduler, positive, negative, denoise, noise_mask, + wildcard_opt=None, wildcard_opt_concat_mode=None, + detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None, control_net_wrapper=None, noise_mask_feather=0, scheduler_func=None): + if noise_mask is not None: + noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) + noise_mask = noise_mask.squeeze(3) + + if noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + if wildcard_opt is not None and wildcard_opt != "": + model, _, wildcard_positive = wildcards.process_with_loras(wildcard_opt, model, clip) + + if wildcard_opt_concat_mode == "concat": + positive = nodes.ConditioningConcat().concat(positive, wildcard_positive)[0] + else: + positive = wildcard_positive + + h = image_frames.shape[1] + w = image_frames.shape[2] + + bbox_h = bbox[3] - bbox[1] + bbox_w = bbox[2] - bbox[0] + + # Skip processing if the detected bbox is already larger than the guide_size + if guide_size_for_bbox: # == "bbox" + # Scale up based on the smaller dimension between width and height. + upscale = guide_size / min(bbox_w, bbox_h) + else: + # for cropped_size + upscale = guide_size / min(w, h) + + new_w = int(w * upscale) + new_h = int(h * upscale) + + # safeguard + if 'aitemplate_keep_loaded' in model.model_options: + max_size = min(4096, max_size) + + if new_w > max_size or new_h > max_size: + upscale *= max_size / max(new_w, new_h) + new_w = int(w * upscale) + new_h = int(h * upscale) + + if upscale <= 1.0 or new_w == 0 or new_h == 0: + logging.info("Detailer: force inpaint") + upscale = 1.0 + new_w = w + new_h = h + + if detailer_hook is not None: + new_w, new_h = detailer_hook.touch_scaled_size(new_w, new_h) + + logging.info(f"Detailer: segment upscale for ({bbox_w, bbox_h}) | crop region {w, h} x {upscale} -> {new_w, new_h}") + + # upscale the mask tensor by a factor of 2 using bilinear interpolation + if isinstance(noise_mask, np.ndarray): + noise_mask = torch.from_numpy(noise_mask) + + if len(noise_mask.shape) == 2: + noise_mask = noise_mask.unsqueeze(0) + else: # == 3 + noise_mask = noise_mask + + upscaled_mask = None + + for single_mask in noise_mask: + single_mask = single_mask.unsqueeze(0).unsqueeze(0) + upscaled_single_mask = torch.nn.functional.interpolate(single_mask, size=(new_h, new_w), mode='bilinear', align_corners=False) + upscaled_single_mask = upscaled_single_mask.squeeze(0) + + if upscaled_mask is None: + upscaled_mask = upscaled_single_mask + else: + upscaled_mask = torch.cat((upscaled_mask, upscaled_single_mask), dim=0) + + latent_frames = None + for image in image_frames: + image = torch.from_numpy(image).unsqueeze(0) + + # upscale + upscaled_image = utils.tensor_resize(image, new_w, new_h) + + # ksampler + samples = utils.to_latent_image(upscaled_image, vae)['samples'] + + if latent_frames is None: + latent_frames = samples + else: + latent_frames = torch.concat((latent_frames, samples), dim=0) + + cnet_images = None + if control_net_wrapper is not None: + positive, negative, cnet_images = control_net_wrapper.apply(positive, negative, torch.from_numpy(image_frames), noise_mask, use_acn=True) + + if len(upscaled_mask) != len(image_frames) and len(upscaled_mask) > 1: + logging.warning(f"[Impact Pack] DetailerForAnimateDiff: The number of the mask frames({len(upscaled_mask)}) and the image frames({len(image_frames)}) are different. Combine the mask frames and apply.") + combined_mask = upscaled_mask[0].to(torch.uint8) + + for frame_mask in upscaled_mask[1:]: + combined_mask |= (frame_mask * 255).to(torch.uint8) + + combined_mask = (combined_mask/255.0).to(torch.float32) + + upscaled_mask = combined_mask.expand(len(image_frames), -1, -1) + upscaled_mask = utils.to_binary_mask(upscaled_mask, 0.1) + + latent = { + 'noise_mask': upscaled_mask, + 'samples': latent_frames + } + + + sampler_opt=None + if detailer_hook is not None: + sampler_opt = detailer_hook.get_custom_sampler() + + if detailer_hook is not None: + latent = detailer_hook.post_encode(latent) + + refined_latent = impact_sampling.ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + latent, denoise, refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative, scheduler_func=scheduler_func, sampler_opt=sampler_opt) + + if detailer_hook is not None: + refined_latent = detailer_hook.pre_decode(refined_latent) + + refined_image_frames = None + for refined_sample in refined_latent['samples']: + refined_sample = refined_sample.unsqueeze(0) + + # non-latent downscale - latent downscale cause bad quality + refined_image = vae.decode(refined_sample) + + if refined_image_frames is None: + refined_image_frames = refined_image + else: + refined_image_frames = torch.concat((refined_image_frames, refined_image), dim=0) + + if detailer_hook is not None: + refined_image_frames = detailer_hook.post_decode(refined_image_frames) + + refined_image_frames = nodes.ImageScale().upscale(image=refined_image_frames, upscale_method='lanczos', width=w, height=h, crop='disabled')[0] + + return refined_image_frames, cnet_images + + +def composite_to(dest_latent, crop_region, src_latent): + x1 = crop_region[0] + y1 = crop_region[1] + + # composite to original latent + lc = nodes.LatentComposite() + orig_image = lc.composite(dest_latent, src_latent, x1, y1) + + return orig_image[0] + + +def sam_predict(predictor, points, plabs, bbox, threshold): + point_coords = None if not points else np.array(points) + point_labels = None if not plabs else np.array(plabs) + + box = np.array([bbox]) if bbox is not None else None + + cur_masks, scores, _ = predictor.predict(point_coords=point_coords, point_labels=point_labels, box=box) + + total_masks = [] + + selected = False + max_score = 0 + max_mask = None + for idx in range(len(scores)): + if scores[idx] > max_score: + max_score = scores[idx] + max_mask = cur_masks[idx] + + if scores[idx] >= threshold: + selected = True + total_masks.append(cur_masks[idx]) + else: + pass + + if not selected and max_mask is not None: + total_masks.append(max_mask) + + return total_masks + + +class SAMWrapper: + def __init__(self, model, is_auto_mode, safe_to_gpu=None): + self.model = model + self.safe_to_gpu = safe_to_gpu if safe_to_gpu is not None else SafeToGPU_stub() + self.is_auto_mode = is_auto_mode + + def prepare_device(self): + if self.is_auto_mode: + device = comfy.model_management.get_torch_device() + self.safe_to_gpu.to_device(self.model, device=device) + + def release_device(self): + if self.is_auto_mode: + self.model.to(device="cpu") + + def predict(self, image, points, plabs, bbox, threshold): + predictor = SamPredictor(self.model) + predictor.set_image(image, "RGB") + + return sam_predict(predictor, points, plabs, bbox, threshold) + + +class SAM2Wrapper: + def __init__(self, config, modelname, is_auto_mode, safe_to_gpu=None, device_mode="AUTO"): + self.config = config + self.modelname = modelname + self.image_predictor = None + self.video_predictor = None + self.device_mode = device_mode + self.safe_to_gpu = safe_to_gpu if safe_to_gpu is not None else SafeToGPU_stub() + self.is_auto_mode = is_auto_mode + + def prepare_device(self): + pass + + def prepare_image_device(self): + if self.is_auto_mode: + device = comfy.model_management.get_torch_device() + self.safe_to_gpu.to_device(self.image_predictor.model, device=device) + + def prepare_video_device(self): + if self.is_auto_mode: + device = comfy.model_management.get_torch_device() + self.safe_to_gpu.to_device(self.video_predictor, device=device) + + def release_device(self): + if self.is_auto_mode: + if self.image_predictor: + self.image_predictor.model.to(device="cpu") + if self.video_predictor: + self.video_predictor.to(device="cpu") + + def predict(self, image, points, plabs, bbox, threshold): + if not is_sam2_available: + raise Exception(sam2_unavailable_message) + + if self.image_predictor is None: + self.image_predictor = SAM2ImagePredictor(build_sam2(self.config, self.modelname)) + + self.prepare_image_device() + + self.image_predictor.set_image(image) + + return sam_predict(self.image_predictor, points, plabs, bbox, threshold) + + def predict_video_segs(self, image_frames, segs): + if not is_sam2_available: + raise Exception(sam2_unavailable_message) + + if self.video_predictor is None: + self.video_predictor = build_sam2_video_predictor(self.config, self.modelname) + + self.prepare_video_device() + + orig_video_height = image_frames.shape[1] + orig_video_width = image_frames.shape[2] + + image_frames, padding = utils.resize_with_padding(image_frames, self.video_predictor.image_size, self.video_predictor.image_size) + image_frames = image_frames.permute(0, 3, 1, 2) + + inference_state = {} + inference_state["images"] = image_frames + inference_state["num_frames"] = len(image_frames) + inference_state["video_height"] = self.video_predictor.image_size + inference_state["video_width"] = self.video_predictor.image_size + inference_state["offload_video_to_cpu"] = True + inference_state["offload_state_to_cpu"] = self.device_mode == "CPU" + inference_state["device"] = self.video_predictor.device + + if inference_state["offload_state_to_cpu"]: + inference_state["storage_device"] = torch.device("cpu") + else: + inference_state["storage_device"] = self.video_predictor.device + + inference_state["point_inputs_per_obj"] = {} + inference_state["mask_inputs_per_obj"] = {} + inference_state["cached_features"] = {} + inference_state["constants"] = {} + + inference_state["obj_id_to_idx"] = OrderedDict() + inference_state["obj_idx_to_id"] = OrderedDict() + inference_state["obj_ids"] = [] + + inference_state["output_dict_per_obj"] = {} + inference_state["temp_output_dict_per_obj"] = {} + inference_state["frames_tracked_per_obj"] = {} + self.video_predictor._get_image_feature(inference_state, frame_idx=0, batch_size=1) + + temp_masks = {} + for i in range(0, len(segs[1])): + bbox = segs[1][i].bbox + + adjusted_bbox = utils.adjust_bbox_after_resize( + bbox, + (orig_video_height, orig_video_width), + (self.video_predictor.image_size, self.video_predictor.image_size), + padding + ) + + points = [utils.center_of_bbox(adjusted_bbox)] + plabs = [1] + self.video_predictor.add_new_points_or_box(inference_state=inference_state, frame_idx=0, obj_id=i, points=points, labels=plabs, box=adjusted_bbox) + temp_masks[i] = [] + + for frame_idx, object_ids, masks in self.video_predictor.propagate_in_video(inference_state): + for i in object_ids: + m = masks[i] + m = m.permute(1, 2, 0) + temp_masks[i].append(m) + + result = {} + for k, v in temp_masks.items(): + m = torch.stack(v, dim=0) + m = utils.remove_padding(m, padding) + result[k] = utils.resize_with_padding(m, orig_video_width, orig_video_height)[0] + + return result + +class ESAMWrapper: + def __init__(self, model, device): + self.model = model + self.func_inference = nodes.NODE_CLASS_MAPPINGS['Yoloworld_ESAM_Zho'] + self.device = device + + def prepare_device(self): + pass + + def release_device(self): + pass + + def predict(self, image, points, plabs, bbox, threshold): + if self.device == 'CPU': + self.device = 'cpu' + else: + self.device = 'cuda' + + detected_masks = self.func_inference.inference_sam_with_boxes(image=image, xyxy=[bbox], model=self.model, device=self.device) + return [detected_masks.squeeze(0)] + + +def make_sam_mask(sam, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + + if not hasattr(sam, 'sam_wrapper') and not isinstance(sam, SAM2Wrapper): + raise Exception("[Impact Pack] Invalid SAMLoader is connected. Make sure 'SAMLoader (Impact)'.\nKnown issue: The ComfyUI-YOLO node overrides the SAMLoader (Impact), making it unusable. You need to uninstall ComfyUI-YOLO.\n\n\n") + + + if isinstance(sam, SAM2Wrapper): + sam_obj = sam + else: + sam_obj = sam.sam_wrapper + + sam_obj.prepare_device() + + try: + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + total_masks = [] + + use_small_negative = mask_hint_use_negative == "Small" + + # seg_shape = segs[0] + segs = segs[1] + if detection_hint == "mask-points": + points = [] + plabs = [] + + for i in range(len(segs)): + bbox = segs[i].bbox + center = utils.center_of_bbox(segs[i].bbox) + points.append(center) + + # small point is background, big point is foreground + if use_small_negative and bbox[2] - bbox[0] < 10: + plabs.append(0) + else: + plabs.append(1) + + detected_masks = sam_obj.predict(image, points, plabs, None, threshold) + total_masks += detected_masks + + else: + for i in range(len(segs)): + bbox = segs[i].bbox + center = utils.center_of_bbox(bbox) + + x1 = max(bbox[0] - bbox_expansion, 0) + y1 = max(bbox[1] - bbox_expansion, 0) + x2 = min(bbox[2] + bbox_expansion, image.shape[1]) + y2 = min(bbox[3] + bbox_expansion, image.shape[0]) + + dilated_bbox = [x1, y1, x2, y2] + + points = [] + plabs = [] + if detection_hint == "center-1": + points.append(center) + plabs = [1] # 1 = foreground point, 0 = background point + + elif detection_hint == "horizontal-2": + gap = (x2 - x1) / 3 + points.append((x1 + gap, center[1])) + points.append((x1 + gap * 2, center[1])) + plabs = [1, 1] + + elif detection_hint == "vertical-2": + gap = (y2 - y1) / 3 + points.append((center[0], y1 + gap)) + points.append((center[0], y1 + gap * 2)) + plabs = [1, 1] + + elif detection_hint == "rect-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, center[1])) + points.append((x1 + x_gap * 2, center[1])) + points.append((center[0], y1 + y_gap)) + points.append((center[0], y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "diamond-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, y1 + y_gap)) + points.append((x1 + x_gap * 2, y1 + y_gap)) + points.append((x1 + x_gap, y1 + y_gap * 2)) + points.append((x1 + x_gap * 2, y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "mask-point-bbox": + center = utils.center_of_bbox(segs[i].bbox) + points.append(center) + plabs = [1] + + elif detection_hint == "mask-area": + points, plabs = gen_detection_hints_from_mask_area(segs[i].crop_region[0], segs[i].crop_region[1], + segs[i].cropped_mask, + mask_hint_threshold, use_small_negative) + + if mask_hint_use_negative == "Outter": + npoints, nplabs = gen_negative_hints(image.shape[0], image.shape[1], + segs[i].crop_region[0], segs[i].crop_region[1], + segs[i].crop_region[2], segs[i].crop_region[3]) + + points += npoints + plabs += nplabs + + detected_masks = sam_obj.predict(image, points, plabs, dilated_bbox, threshold) + total_masks += detected_masks + + # merge every collected masks + mask = utils.combine_masks2(total_masks) + + finally: + sam_obj.release_device() + + if mask is not None: + mask = mask.float() + mask = utils.dilate_mask(mask.cpu().numpy(), dilation) + mask = torch.from_numpy(mask) + else: + size = image.shape[0], image.shape[1] + mask = torch.zeros(size, dtype=torch.float32, device="cpu") # empty mask + + mask = utils.make_3d_mask(mask) + return mask + + +def generate_detection_hints(image, seg, center, detection_hint, dilated_bbox, mask_hint_threshold, use_small_negative, + mask_hint_use_negative): + [x1, y1, x2, y2] = dilated_bbox + + points = [] + plabs = [] + if detection_hint == "center-1": + points.append(center) + plabs = [1] # 1 = foreground point, 0 = background point + + elif detection_hint == "horizontal-2": + gap = (x2 - x1) / 3 + points.append((x1 + gap, center[1])) + points.append((x1 + gap * 2, center[1])) + plabs = [1, 1] + + elif detection_hint == "vertical-2": + gap = (y2 - y1) / 3 + points.append((center[0], y1 + gap)) + points.append((center[0], y1 + gap * 2)) + plabs = [1, 1] + + elif detection_hint == "rect-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, center[1])) + points.append((x1 + x_gap * 2, center[1])) + points.append((center[0], y1 + y_gap)) + points.append((center[0], y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "diamond-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, y1 + y_gap)) + points.append((x1 + x_gap * 2, y1 + y_gap)) + points.append((x1 + x_gap, y1 + y_gap * 2)) + points.append((x1 + x_gap * 2, y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "mask-point-bbox": + center = utils.center_of_bbox(seg.bbox) + points.append(center) + plabs = [1] + + elif detection_hint == "mask-area": + points, plabs = gen_detection_hints_from_mask_area(seg.crop_region[0], seg.crop_region[1], + seg.cropped_mask, + mask_hint_threshold, use_small_negative) + + if mask_hint_use_negative == "Outter": + npoints, nplabs = gen_negative_hints(image.shape[0], image.shape[1], + seg.crop_region[0], seg.crop_region[1], + seg.crop_region[2], seg.crop_region[3]) + + points += npoints + plabs += nplabs + + return points, plabs + + +def convert_and_stack_masks(masks): + if len(masks) == 0: + return None + + mask_tensors = [] + for mask in masks: + mask_array = np.array(mask, dtype=np.uint8) + mask_tensor = torch.from_numpy(mask_array) + mask_tensors.append(mask_tensor) + + stacked_masks = torch.stack(mask_tensors, dim=0) + stacked_masks = stacked_masks.unsqueeze(1) + + return stacked_masks + + +def merge_and_stack_masks(stacked_masks, group_size): + if stacked_masks is None: + return None + + num_masks = stacked_masks.size(0) + merged_masks = [] + + for i in range(0, num_masks, group_size): + subset_masks = stacked_masks[i:i + group_size] + merged_mask = torch.any(subset_masks, dim=0) + merged_masks.append(merged_mask) + + if len(merged_masks) > 0: + merged_masks = torch.stack(merged_masks, dim=0) + + return merged_masks + + +def segs_scale_match(segs, target_shape): + h = segs[0][0] + w = segs[0][1] + + th = target_shape[1] + tw = target_shape[2] + + if (h == th and w == tw) or h == 0 or w == 0: + return segs + + rh = th / h + rw = tw / w + + new_segs = [] + for seg in segs[1]: + cropped_image = seg.cropped_image + cropped_mask = seg.cropped_mask + x1, y1, x2, y2 = seg.crop_region + bx1, by1, bx2, by2 = seg.bbox + + crop_region = int(x1*rw), int(y1*rw), int(x2*rh), int(y2*rh) + bbox = int(bx1*rw), int(by1*rw), int(bx2*rh), int(by2*rh) + new_w = crop_region[2] - crop_region[0] + new_h = crop_region[3] - crop_region[1] + + if isinstance(cropped_mask, np.ndarray): + cropped_mask = torch.from_numpy(cropped_mask) + + if isinstance(cropped_mask, torch.Tensor) and len(cropped_mask.shape) == 3: + cropped_mask = torch.nn.functional.interpolate(cropped_mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False) + cropped_mask = cropped_mask.squeeze(0) + else: + cropped_mask = torch.nn.functional.interpolate(cropped_mask.unsqueeze(0).unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False) + cropped_mask = cropped_mask.squeeze(0).squeeze(0).numpy() + + if cropped_image is not None: + cropped_image = utils.tensor_resize(cropped_image if isinstance(cropped_image, torch.Tensor) else torch.from_numpy(cropped_image), new_w, new_h) + cropped_image = cropped_image.numpy() + + new_seg = SEG(cropped_image, cropped_mask, seg.confidence, crop_region, bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return (th, tw), new_segs + + +# Used Python's slicing feature. stacked_masks[2::3] means starting from index 2, selecting every third tensor with a step size of 3. +# This allows for quickly obtaining the last tensor of every three tensors in stacked_masks. +def every_three_pick_last(stacked_masks): + selected_masks = stacked_masks[2::3] + return selected_masks + + +def make_sam_mask_segmented(sam, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + + if not hasattr(sam, 'sam_wrapper'): + raise Exception("[Impact Pack] Invalid SAMLoader is connected. Make sure 'SAMLoader (Impact)'.") + + sam_obj = sam.sam_wrapper + sam_obj.prepare_device() + + try: + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + total_masks = [] + + use_small_negative = mask_hint_use_negative == "Small" + + # seg_shape = segs[0] + segs = segs[1] + if detection_hint == "mask-points": + points = [] + plabs = [] + + for i in range(len(segs)): + bbox = segs[i].bbox + center = utils.center_of_bbox(bbox) + points.append(center) + + # small point is background, big point is foreground + if use_small_negative and bbox[2] - bbox[0] < 10: + plabs.append(0) + else: + plabs.append(1) + + detected_masks = sam_obj.predict(image, points, plabs, None, threshold) + total_masks += detected_masks + + else: + for i in range(len(segs)): + bbox = segs[i].bbox + center = utils.center_of_bbox(bbox) + x1 = max(bbox[0] - bbox_expansion, 0) + y1 = max(bbox[1] - bbox_expansion, 0) + x2 = min(bbox[2] + bbox_expansion, image.shape[1]) + y2 = min(bbox[3] + bbox_expansion, image.shape[0]) + + dilated_bbox = [x1, y1, x2, y2] + + points, plabs = generate_detection_hints(image, segs[i], center, detection_hint, dilated_bbox, + mask_hint_threshold, use_small_negative, + mask_hint_use_negative) + + detected_masks = sam_obj.predict(image, points, plabs, dilated_bbox, threshold) + + total_masks += detected_masks + + # merge every collected masks + mask = utils.combine_masks2(total_masks) + + finally: + sam_obj.release_device() + + mask_working_device = torch.device("cpu") + + if mask is not None: + mask = mask.float() + mask = utils.dilate_mask(mask.cpu().numpy(), dilation) + mask = torch.from_numpy(mask) + mask = mask.to(device=mask_working_device) + else: + # Extracting batch, height and width + height, width, _ = image.shape + mask = torch.zeros( + (height, width), dtype=torch.float32, device=mask_working_device + ) # empty mask + + stacked_masks = convert_and_stack_masks(total_masks) + + return (mask, merge_and_stack_masks(stacked_masks, group_size=3)) + # return every_three_pick_last(stacked_masks) + + +def segs_bitwise_and_mask(segs, mask): + mask = utils.make_2d_mask(mask) + + if mask is None: + logging.warning("[SegsBitwiseAndMask] Cannot operate: MASK is empty.") + return ([],) + + items = [] + + mask = (mask.cpu().numpy() * 255).astype(np.uint8) + + for seg in segs[1]: + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = np.bitwise_and(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def segs_bitwise_subtract_mask(segs, mask): + mask = utils.make_2d_mask(mask) + + if mask is None: + logging.warning("[SegsBitwiseSubtractMask] Cannot operate: MASK is empty.") + return ([],) + + items = [] + + mask = (mask.cpu().numpy() * 255).astype(np.uint8) + + for seg in segs[1]: + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = cv2.subtract(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def apply_mask_to_each_seg(segs, masks): + if masks is None: + logging.warning("[SegsBitwiseAndMask] Cannot operate: MASK is empty.") + return (segs[0], [],) + + items = [] + + masks = masks.squeeze(1) + + for seg, mask in zip(segs[1], masks): + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = (mask.cpu().numpy() * 255).astype(np.uint8) + cropped_mask2 = cropped_mask2[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = np.bitwise_and(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def dilate_segs(segs, factor): + if factor == 0: + return segs + + new_segs = [] + for seg in segs[1]: + new_mask = utils.dilate_mask(seg.cropped_mask, factor) + new_seg = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return (segs[0], new_segs) + + +class ONNXDetector: + onnx_model = None + + def __init__(self, onnx_model): + self.onnx_model = onnx_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + try: + import impact.impact_onnx as onnx + + h = image.shape[1] + w = image.shape[2] + + labels, scores, boxes = onnx.onnx_inference(image, self.onnx_model) + + # collect feasible item + result = [] + + for i in range(len(labels)): + if scores[i] > threshold: + item_bbox = boxes[i] + x1, y1, x2, y2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = utils.make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = item_bbox.post_crop_region(w, h, item_bbox, crop_region) + + crop_x1, crop_y1, crop_x2, crop_y2, = crop_region + + # prepare cropped mask + cropped_mask = np.zeros((crop_y2 - crop_y1, crop_x2 - crop_x1)) + cropped_mask[y1 - crop_y1:y2 - crop_y1, x1 - crop_x1:x2 - crop_x1] = 1 + cropped_mask = utils.dilate_mask(cropped_mask, dilation) + + # make items. just convert the integer label to a string + item = SEG(None, cropped_mask, scores[i], crop_region, item_bbox, str(labels[i]), None) + result.append(item) + + shape = h, w + segs = shape, result + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + except Exception as e: + logging.error(f"ONNXDetector: unable to execute.\n{e}") + + def detect_combined(self, image, threshold, dilation): + return segs_to_combined_mask(self.detect(image, threshold, dilation, 1)) + + def setAux(self, x): + pass + + +def batch_mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size=1, label='A', crop_min_size=None, detailer_hook=None): + combined_mask = mask.max(dim=0).values + + segs = mask_to_segs(combined_mask, combined, crop_factor, bbox_fill, drop_size, label, crop_min_size, detailer_hook) + + new_segs = [] + for seg in segs[1]: + x1, y1, x2, y2 = seg.crop_region + cropped_mask = mask[:, y1:y2, x1:x2] + item = SEG(None, cropped_mask, 1.0, seg.crop_region, seg.bbox, label, None) + new_segs.append(item) + + return segs[0], new_segs + + +def mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size=1, label='A', crop_min_size=None, detailer_hook=None, is_contour=True): + drop_size = max(drop_size, 1) + if mask is None: + logging.info("[mask_to_segs] Cannot operate: MASK is empty.") + return ([],) + + if isinstance(mask, np.ndarray): + pass # `mask` is already a NumPy array + else: + try: + mask = mask.numpy() + except AttributeError: + logging.info("[mask_to_segs] Cannot operate: MASK is not a NumPy array or Tensor.") + return ([],) + + if mask is None: + logging.info("[mask_to_segs] Cannot operate: MASK is empty.") + return ([],) + + result = [] + + if len(mask.shape) == 2: + mask = np.expand_dims(mask, axis=0) + + for i in range(mask.shape[0]): + mask_i = mask[i] + + if combined: + indices = np.nonzero(mask_i) + if len(indices[0]) > 0 and len(indices[1]) > 0: + bbox = ( + np.min(indices[1]), + np.min(indices[0]), + np.max(indices[1]), + np.max(indices[0]), + ) + crop_region = utils.make_crop_region( + mask_i.shape[1], mask_i.shape[0], bbox, crop_factor + ) + x1, y1, x2, y2 = crop_region + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(mask_i.shape[1], mask_i.shape[0], bbox, crop_region) + + if x2 - x1 > 0 and y2 - y1 > 0: + cropped_mask = mask_i[y1:y2, x1:x2] + + if bbox_fill: + bx1, by1, bx2, by2 = bbox + cropped_mask = cropped_mask.copy() + cropped_mask[by1:by2, bx1:bx2] = 1.0 + + if cropped_mask is not None: + item = SEG(None, cropped_mask, 1.0, crop_region, bbox, label, None) + result.append(item) + + else: + mask_i_uint8 = (mask_i * 255.0).astype(np.uint8) + contours, ctree = cv2.findContours(mask_i_uint8, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + for j, contour in enumerate(contours): + hierarchy = ctree[0][j] + if hierarchy[3] != -1: + continue + + separated_mask = np.zeros_like(mask_i_uint8) + cv2.drawContours(separated_mask, [contour], 0, 255, -1) + separated_mask = np.array(separated_mask / 255.0).astype(np.float32) + + x, y, w, h = cv2.boundingRect(contour) + bbox = x, y, x + w, y + h + crop_region = utils.make_crop_region( + mask_i.shape[1], mask_i.shape[0], bbox, crop_factor, crop_min_size + ) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(mask_i.shape[1], mask_i.shape[0], bbox, crop_region) + + if w > drop_size and h > drop_size: + if is_contour: + mask_src = separated_mask + else: + mask_src = mask_i * separated_mask + + cropped_mask = np.array( + mask_src[ + crop_region[1]: crop_region[3], + crop_region[0]: crop_region[2], + ] + ) + + if bbox_fill: + cx1, cy1, _, _ = crop_region + bx1 = x - cx1 + bx2 = x+w - cx1 + by1 = y - cy1 + by2 = y+h - cy1 + cropped_mask[by1:by2, bx1:bx2] = 1.0 + + if cropped_mask is not None: + cropped_mask = torch.clip(torch.from_numpy(cropped_mask), 0, 1.0) + item = SEG(None, cropped_mask.numpy(), 1.0, crop_region, bbox, label, None) + result.append(item) + + if not result: + logging.info("[mask_to_segs] Empty mask.") + + logging.info(f"# of Detected SEGS: {len(result)}") + # for r in result: + # print(f"\tbbox={r.bbox}, crop={r.crop_region}, label={r.label}") + + # shape: (b,h,w) -> (h,w) + return (mask.shape[1], mask.shape[2]), result + + +def mediapipe_facemesh_to_segs(image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + parts = { + "face": np.array([0x0A, 0xC8, 0x0A]), + "mouth": np.array([0x0A, 0xB4, 0x0A]), + "left_eyebrow": np.array([0xB4, 0xDC, 0x0A]), + "left_eye": np.array([0xB4, 0xC8, 0x0A]), + "left_pupil": np.array([0xFA, 0xC8, 0x0A]), + "right_eyebrow": np.array([0x0A, 0xDC, 0xB4]), + "right_eye": np.array([0x0A, 0xC8, 0xB4]), + "right_pupil": np.array([0x0A, 0xC8, 0xFA]), + } + + def create_segments(image, color): + image = (image * 255).to(torch.uint8) + image = image.squeeze(0).numpy() + mask = cv2.inRange(image, color, color) + + contours, ctree = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + mask_list = [] + for i, contour in enumerate(contours): + hierarchy = ctree[0][i] + if hierarchy[3] == -1: + convex_hull = cv2.convexHull(contour) + convex_segment = np.zeros_like(image) + cv2.fillPoly(convex_segment, [convex_hull], (255, 255, 255)) + + convex_segment = np.expand_dims(convex_segment, axis=0).astype(np.float32) / 255.0 + tensor = torch.from_numpy(convex_segment) + mask_tensor = torch.any(tensor != 0, dim=-1).float() + mask_tensor = mask_tensor.squeeze(0) + mask_tensor = torch.from_numpy(utils.dilate_mask(mask_tensor.numpy(), dilation)) + mask_list.append(mask_tensor.unsqueeze(0)) + + return mask_list + + segs = [] + + def create_seg(label): + mask_list = create_segments(image, parts[label]) + for mask in mask_list: + seg = mask_to_segs(mask, False, crop_factor, bbox_fill, drop_size=drop_size, label=label, crop_min_size=crop_min_size) + if len(seg[1]) > 0: + segs.extend(seg[1]) + + if face: + create_seg('face') + + if mouth: + create_seg('mouth') + + if left_eyebrow: + create_seg('left_eyebrow') + + if left_eye: + create_seg('left_eye') + + if left_pupil: + create_seg('left_pupil') + + if right_eyebrow: + create_seg('right_eyebrow') + + if right_eye: + create_seg('right_eye') + + if right_pupil: + create_seg('right_pupil') + + return (image.shape[1], image.shape[2]), segs + + +def segs_to_combined_mask(segs): + shape = segs[0] + h = shape[0] + w = shape[1] + + mask = np.zeros((h, w), dtype=np.uint8) + + for seg in segs[1]: + cropped_mask = seg.cropped_mask + crop_region = seg.crop_region + mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).astype(np.uint8) + + return torch.from_numpy(mask.astype(np.float32) / 255.0) + + +def segs_to_masklist(segs): + shape = segs[0] + h = shape[0] + w = shape[1] + + masks = [] + for seg in segs[1]: + if isinstance(seg.cropped_mask, np.ndarray): + cropped_mask = torch.from_numpy(seg.cropped_mask) + else: + cropped_mask = seg.cropped_mask + + if cropped_mask.ndim == 2: + cropped_mask = cropped_mask.unsqueeze(0) + + n = len(cropped_mask) + + mask = torch.zeros((n, h, w), dtype=torch.uint8) + crop_region = seg.crop_region + mask[:, crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).to(torch.uint8) + mask = (mask / 255.0).to(torch.float32) + + for x in mask: + masks.append(x) + + if len(masks) == 0: + empty_mask = torch.zeros((h, w), dtype=torch.float32, device="cpu") + masks = [empty_mask] + + return masks + + +def vae_decode(vae, samples, use_tile, hook, tile_size=512, overlap=64): + if use_tile: + decoder = nodes.VAEDecodeTiled() + if 'overlap' in inspect.signature(decoder.decode).parameters: + pixels = decoder.decode(vae, samples, tile_size, overlap=overlap)[0] + else: + logging.warning("[Impact Pack] Your ComfyUI is outdated.") + pixels = decoder.decode(vae, samples, tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(vae, samples)[0] + + if hook is not None: + pixels = hook.post_decode(pixels) + + return pixels + + +def vae_encode(vae, pixels, use_tile, hook, tile_size=512, overlap=64): + if use_tile: + encoder = nodes.VAEEncodeTiled() + if 'overlap' in inspect.signature(encoder.encode).parameters: + samples = encoder.encode(vae, pixels, tile_size, overlap=overlap)[0] + else: + logging.warning("[Impact Pack] Your ComfyUI is outdated.") + samples = encoder.encode(vae, pixels, tile_size)[0] + else: + samples = nodes.VAEEncode().encode(vae, pixels)[0] + + if hook is not None: + samples = hook.post_encode(samples) + + return samples + + +def latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + return latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, use_tile, tile_size, save_temp_prefix, hook, overlap=overlap)[0] + + +def latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size, overlap=overlap) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(w), int(h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size, overlap=overlap), old_pixels + + +def latent_upscale_on_pixel_space(samples, scale_method, scale_factor, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + return latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile, tile_size, save_temp_prefix, hook, overlap=overlap)[0] + + +def latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size, overlap=overlap) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] * scale_factor + h = pixels.shape[1] * scale_factor + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(w), int(h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size, overlap=overlap), old_pixels + + +def latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, upscale_model, new_w, new_h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + return latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, upscale_model, new_w, new_h, vae, use_tile, tile_size, save_temp_prefix, hook, overlap=overlap)[0] + + +def latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, upscale_model, new_w, new_h, vae, use_tile=False, tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size, overlap=overlap) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] + + # upscale by model upscaler + current_w = w + while current_w < new_w: + model_upscaler = nodes.NODE_CLASS_MAPPINGS['ImageUpscaleWithModel']() + if hasattr(model_upscaler, 'execute'): + pixels = model_upscaler.execute(upscale_model, pixels)[0] + else: + pixels = model_upscaler.upscale(upscale_model, pixels)[0] + + current_w = pixels.shape[2] + if current_w == w: + logging.info("[latent_upscale_on_pixel_space_with_model] x1 upscale model selected") + break + + # downscale to target scale + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(new_w), int(new_h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size, overlap=overlap), old_pixels + + +def latent_upscale_on_pixel_space_with_model(samples, scale_method, upscale_model, scale_factor, vae, use_tile=False, + tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + return latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model, scale_factor, vae, use_tile, tile_size, save_temp_prefix, hook, overlap=overlap)[0] + +def latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model, scale_factor, vae, use_tile=False, + tile_size=512, save_temp_prefix=None, hook=None, overlap=64): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size, overlap=overlap) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] + h = pixels.shape[1] + + new_w = w * scale_factor + new_h = h * scale_factor + + # upscale by model upscaler + current_w = w + while current_w < new_w: + model_upscaler = nodes.NODE_CLASS_MAPPINGS['ImageUpscaleWithModel']() + if hasattr(model_upscaler, 'execute'): + pixels = model_upscaler.execute(upscale_model, pixels)[0] + else: + pixels = model_upscaler.upscale(upscale_model, pixels)[0] + + current_w = pixels.shape[2] + if current_w == w: + logging.info("[latent_upscale_on_pixel_space_with_model] x1 upscale model selected") + break + + # downscale to target scale + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(new_w), int(new_h), False)[0] + + old_pixels = pixels + if hook is not None: + pixels = hook.post_upscale(pixels) + + return vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size, overlap=overlap), old_pixels + + +class TwoSamplersForMaskUpscaler: + def __init__(self, scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, hook_base_opt=None, hook_mask_opt=None, + hook_full_opt=None, + tile_size=512): + + mask = utils.make_2d_mask(mask) + + mask = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) + + self.params = scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae + self.upscale_model = upscale_model_opt + self.full_sampler = full_sampler_opt + self.hook_base = hook_base_opt + self.hook_mask = hook_mask_opt + self.hook_full = hook_full_opt + self.use_tiled_vae = use_tiled_vae + self.tile_size = tile_size + self.is_tiled = False + self.vae = vae + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae = self.params + + mask = utils.make_2d_mask(mask) + + self.prepare_hook(step_info) + + # upscale latent + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space(samples, scale_method, upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_base, tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_mask, tile_size=self.tile_size) + + return self.do_samples(step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent) + + def prepare_hook(self, step_info): + if self.hook_base is not None: + self.hook_base.set_steps(step_info) + if self.hook_mask is not None: + self.hook_mask.set_steps(step_info) + if self.hook_full is not None: + self.hook_full.set_steps(step_info) + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae = self.params + + mask = utils.make_2d_mask(mask) + + self.prepare_hook(step_info) + + # upscale latent + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_base, + tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, self.upscale_model, + w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_mask, + tile_size=self.tile_size) + + return self.do_samples(step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent) + + def is_full_sample_time(self, step_info, sample_schedule): + cur_step, total_step = step_info + + # make start from 1 instead of zero + cur_step += 1 + total_step += 1 + + if sample_schedule == "none": + return False + + elif sample_schedule == "interleave1": + return cur_step % 2 == 0 + + elif sample_schedule == "interleave2": + return cur_step % 3 == 0 + + elif sample_schedule == "interleave3": + return cur_step % 4 == 0 + + elif sample_schedule == "last1": + return cur_step == total_step + + elif sample_schedule == "last2": + return cur_step >= total_step - 1 + + elif sample_schedule == "interleave1+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + elif sample_schedule == "interleave2+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + elif sample_schedule == "interleave3+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + def do_samples(self, step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent): + mask = utils.make_2d_mask(mask) + + if self.is_full_sample_time(step_info, sample_schedule): + logging.info(f"step_info={step_info} / full time") + + upscaled_latent = base_sampler.sample(upscaled_latent, self.hook_base) + sampler = self.full_sampler if self.full_sampler is not None else base_sampler + return sampler.sample(upscaled_latent, self.hook_full) + + else: + logging.info(f"step_info={step_info} / non-full time") + # upscale mask + if mask.ndim == 2: + mask = mask[None, :, :, None] + upscaled_mask = F.interpolate(mask, size=(upscaled_latent['samples'].shape[2], upscaled_latent['samples'].shape[3]), mode='bilinear', align_corners=True) + upscaled_mask = upscaled_mask[:, :, :upscaled_latent['samples'].shape[2], :upscaled_latent['samples'].shape[3]] + + # base sampler + upscaled_inv_mask = torch.where(upscaled_mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + upscaled_latent['noise_mask'] = upscaled_inv_mask + upscaled_latent = base_sampler.sample(upscaled_latent, self.hook_base) + + # mask sampler + upscaled_latent['noise_mask'] = upscaled_mask + upscaled_latent = mask_sampler.sample(upscaled_latent, self.hook_mask) + + # remove mask + del upscaled_latent['noise_mask'] + return upscaled_latent + + +class PixelKSampleUpscaler: + def __init__(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, hook_opt=None, tile_size=512, scheduler_func=None, + tile_cnet_opt=None, tile_cnet_strength=1.0): + self.params = scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.upscale_model = upscale_model_opt + self.hook = hook_opt + self.use_tiled_vae = use_tiled_vae + self.tile_size = tile_size + self.is_tiled = False + self.vae = vae + self.scheduler_func = scheduler_func + self.tile_cnet = tile_cnet_opt + self.tile_cnet_strength = tile_cnet_strength + + def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise, images): + if self.tile_cnet is not None: + image_batch, image_w, image_h, _ = images.shape + if image_batch > 1: + warnings.warn('Multiple latents in batch, Tile ControlNet being ignored') + else: + if 'TilePreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise RuntimeError("'TilePreprocessor' node (from comfyui_controlnet_aux) isn't installed.") + preprocessor = nodes.NODE_CLASS_MAPPINGS['TilePreprocessor']() + # might add capacity to set pyrUp_iters later, not needed for now though + preprocessed = preprocessor.execute(images, pyrUp_iters=3, resolution=min(image_w, image_h))[0] + positive, negative = nodes.ControlNetApplyAdvanced().apply_controlnet(positive=positive, + negative=negative, + control_net=self.tile_cnet, + image=preprocessed, + strength=self.tile_cnet_strength, + start_percent=0, + end_percent=1.0, + vae=self.vae) + + refined_latent = impact_sampling.impact_sample(model, seed, steps, cfg, sampler_name, scheduler, + positive, negative, upscaled_latent, denoise, scheduler_func=self.scheduler_func) + + return refined_latent + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space2(samples, scale_method, upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, hook=self.hook, tile_size=512) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model2(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + if self.hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + if 'noise_mask' in samples: + upscaled_latent['noise_mask'] = samples['noise_mask'] + + refined_latent = self.sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise, upscaled_images) + return refined_latent + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, hook=self.hook, + tile_size=self.tile_size) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, self.upscale_model, + w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + if self.hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + if 'noise_mask' in samples: + upscaled_latent['noise_mask'] = samples['noise_mask'] + + refined_latent = self.sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise, upscaled_images) + return refined_latent + + +class IPAdapterWrapper: + def __init__(self, ipadapter_pipe, weight, noise, weight_type, start_at, end_at, unfold_batch, weight_v2, reference_image, neg_image=None, prev_control_net=None, combine_embeds='concat'): + self.reference_image = reference_image + self.ipadapter_pipe = ipadapter_pipe + self.weight = weight + self.weight_type = weight_type + self.noise = noise + self.start_at = start_at + self.end_at = end_at + self.unfold_batch = unfold_batch + self.prev_control_net = prev_control_net + self.weight_v2 = weight_v2 + self.image = reference_image + self.neg_image = neg_image + self.combine_embeds = combine_embeds + + # name 'apply_ipadapter' isn't allowed + def doit_ipadapter(self, model): + cnet_image_list = [self.image] + prev_cnet_images = [] + + if 'IPAdapterAdvanced' not in nodes.NODE_CLASS_MAPPINGS: + if 'IPAdapterApply' in nodes.NODE_CLASS_MAPPINGS: + raise Exception("[ERROR] 'ComfyUI IPAdapter Plus' is outdated.") + + utils.try_install_custom_node('https://github.com/cubiq/ComfyUI_IPAdapter_plus', + "To use 'IPAdapterApplySEGS' node, 'ComfyUI IPAdapter Plus' extension is required.") + raise Exception("[ERROR] To use IPAdapterApplySEGS, you need to install 'ComfyUI IPAdapter Plus'") + + obj = nodes.NODE_CLASS_MAPPINGS['IPAdapterAdvanced'] + + ipadapter, _, clip_vision, insightface, lora_loader = self.ipadapter_pipe + model = lora_loader(model) + + if self.prev_control_net is not None: + model, prev_cnet_images = self.prev_control_net.doit_ipadapter(model) + + model = obj().apply_ipadapter(model=model, ipadapter=ipadapter, weight=self.weight, weight_type=self.weight_type, + start_at=self.start_at, end_at=self.end_at, combine_embeds=self.combine_embeds, + clip_vision=clip_vision, image=self.image, image_negative=self.neg_image, attn_mask=None, + insightface=insightface, weight_faceidv2=self.weight_v2)[0] + + cnet_image_list.extend(prev_cnet_images) + + return model, cnet_image_list + + def apply(self, positive, negative, image, mask=None, use_acn=False): + if self.prev_control_net is not None: + return self.prev_control_net.apply(positive, negative, image, mask, use_acn=use_acn) + else: + return positive, negative, [] + + +class ControlNetWrapper: + def __init__(self, control_net, strength, preprocessor, prev_control_net=None, original_size=None, crop_region=None, control_image=None): + self.control_net = control_net + self.strength = strength + self.preprocessor = preprocessor + self.prev_control_net = prev_control_net + + if original_size is not None and crop_region is not None and control_image is not None: + self.control_image = utils.tensor_resize(control_image, original_size[1], original_size[0]) + self.control_image = torch.tensor(utils.tensor_crop(self.control_image, crop_region)) + else: + self.control_image = None + + def apply(self, positive, negative, image, mask=None, use_acn=False): + cnet_image_list = [] + prev_cnet_images = [] + + if self.prev_control_net is not None: + positive, negative, prev_cnet_images = self.prev_control_net.apply(positive, negative, image, mask, use_acn=use_acn) + + if self.control_image is not None: + cnet_image = self.control_image + elif self.preprocessor is not None: + cnet_image = self.preprocessor.apply(image, mask) + else: + cnet_image = image + + cnet_image_list.extend(prev_cnet_images) + cnet_image_list.append(cnet_image) + + if use_acn: + if "ACN_AdvancedControlNetApply" in nodes.NODE_CLASS_MAPPINGS: + acn = nodes.NODE_CLASS_MAPPINGS['ACN_AdvancedControlNetApply']() + positive, negative, _ = acn.apply_controlnet(positive=positive, negative=negative, control_net=self.control_net, image=cnet_image, + strength=self.strength, start_percent=0.0, end_percent=1.0) + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'ControlNetWrapper' for AnimateDiff, 'ComfyUI-Advanced-ControlNet' extension is required.") + raise Exception("'ACN_AdvancedControlNetApply' node isn't installed.") + else: + positive = nodes.ControlNetApply().apply_controlnet(positive, self.control_net, cnet_image, self.strength)[0] + + return positive, negative, cnet_image_list + + def doit_ipadapter(self, model): + if self.prev_control_net is not None: + return self.prev_control_net.doit_ipadapter(model) + else: + return model, [] + + +class ControlNetAdvancedWrapper: + def __init__(self, control_net, strength, start_percent, end_percent, preprocessor, prev_control_net=None, + original_size=None, crop_region=None, control_image=None, vae=None): + self.control_net = control_net + self.strength = strength + self.preprocessor = preprocessor + self.prev_control_net = prev_control_net + self.start_percent = start_percent + self.end_percent = end_percent + self.vae = vae + + if original_size is not None and crop_region is not None and control_image is not None: + self.control_image = utils.tensor_resize(control_image, original_size[1], original_size[0]) + self.control_image = torch.tensor(utils.tensor_crop(self.control_image, crop_region)) + else: + self.control_image = None + + def doit_ipadapter(self, model): + if self.prev_control_net is not None: + return self.prev_control_net.doit_ipadapter(model) + else: + return model, [] + + def apply(self, positive, negative, image, mask=None, use_acn=False): + cnet_image_list = [] + prev_cnet_images = [] + + if self.prev_control_net is not None: + positive, negative, prev_cnet_images = self.prev_control_net.apply(positive, negative, image, mask) + + if self.control_image is not None: + cnet_image = self.control_image + elif self.preprocessor is not None: + cnet_image = self.preprocessor.apply(image, mask) + else: + cnet_image = image + + cnet_image_list.extend(prev_cnet_images) + cnet_image_list.append(cnet_image) + + if use_acn: + if "ACN_AdvancedControlNetApply" in nodes.NODE_CLASS_MAPPINGS: + acn = nodes.NODE_CLASS_MAPPINGS['ACN_AdvancedControlNetApply']() + positive, negative, _ = acn.apply_controlnet(positive=positive, negative=negative, control_net=self.control_net, image=cnet_image, + strength=self.strength, start_percent=self.start_percent, end_percent=self.end_percent) + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'ControlNetAdvancedWrapper' for AnimateDiff, 'ComfyUI-Advanced-ControlNet' extension is required.") + raise Exception("'ACN_AdvancedControlNetApply' node isn't installed.") + else: + if self.vae is not None: + apply_controlnet = nodes.ControlNetApplyAdvanced().apply_controlnet + signature = inspect.signature(apply_controlnet) + + if 'vae' in signature.parameters: + positive, negative = nodes.ControlNetApplyAdvanced().apply_controlnet(positive, negative, self.control_net, cnet_image, self.strength, self.start_percent, self.end_percent, vae=self.vae) + else: + logging.error("[Impact Pack] ERROR: The ComfyUI version is outdated. VAE cannot be used in ApplyControlNet.") + raise Exception("[Impact Pack] ERROR: The ComfyUI version is outdated. VAE cannot be used in ApplyControlNet.") + else: + positive, negative = nodes.ControlNetApplyAdvanced().apply_controlnet(positive, negative, self.control_net, cnet_image, self.strength, self.start_percent, self.end_percent) + + return positive, negative, cnet_image_list + + +# REQUIREMENTS: BlenderNeko/ComfyUI_TiledKSampler +class TiledKSamplerWrapper: + params = None + + def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy): + self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy + + def sample(self, latent_image, hook=None): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + TiledKSampler = nodes.NODE_CLASS_MAPPINGS['BNK_TiledKSampler'] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'TiledKSamplerProvider', 'Tiled sampling for ComfyUI' extension is required.") + raise Exception("'BNK_TiledKSampler' node isn't installed.") + + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise) + + return TiledKSampler().sample(model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, + scheduler, positive, negative, latent_image, denoise)[0] + + +class PixelTiledKSampleUpscaler: + def __init__(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, + denoise, + tile_width, tile_height, tiling_strategy, + upscale_model_opt=None, hook_opt=None, tile_cnet_opt=None, tile_size=512, tile_cnet_strength=1.0, overlap=64): + self.params = scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.vae = vae + self.tile_params = tile_width, tile_height, tiling_strategy + self.upscale_model = upscale_model_opt + self.hook = hook_opt + self.tile_cnet = tile_cnet_opt + self.tile_size = tile_size + self.is_tiled = True + self.tile_cnet_strength = tile_cnet_strength + self.overlap = overlap + + def tiled_ksample(self, latent, images): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + TiledKSampler = nodes.NODE_CLASS_MAPPINGS['BNK_TiledKSampler'] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'PixelTiledKSampleUpscalerProvider', 'Tiled sampling for ComfyUI' extension is required.") + raise RuntimeError("'BNK_TiledKSampler' node isn't installed.") + + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + tile_width, tile_height, tiling_strategy = self.tile_params + + if self.tile_cnet is not None: + image_batch, image_w, image_h, _ = images.shape + if image_batch > 1: + warnings.warn('Multiple latents in batch, Tile ControlNet being ignored') + else: + if 'TilePreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise RuntimeError("'TilePreprocessor' node (from comfyui_controlnet_aux) isn't installed.") + preprocessor = nodes.NODE_CLASS_MAPPINGS['TilePreprocessor']() + # might add capacity to set pyrUp_iters later, not needed for now though + preprocessed = preprocessor.execute(images, pyrUp_iters=3, resolution=min(image_w, image_h))[0] + + positive, negative = nodes.ControlNetApplyAdvanced().apply_controlnet(positive=positive, + negative=negative, + control_net=self.tile_cnet, + image=preprocessed, + strength=self.tile_cnet_strength, + start_percent=0, end_percent=1.0, + vae=self.vae) + + return TiledKSampler().sample(model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, + scheduler, positive, negative, latent, denoise)[0] + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space2(samples, scale_method, upscale_factor, vae, + use_tile=True, save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model2(samples, scale_method, self.upscale_model, + upscale_factor, vae, use_tile=True, + save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + + refined_latent = self.tiled_ksample(upscaled_latent, upscaled_images) + + return refined_latent + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_shape2(samples, scale_method, w, h, vae, + use_tile=True, save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + else: + upscaled_latent, upscaled_images = \ + latent_upscale_on_pixel_space_with_model_shape2(samples, scale_method, + self.upscale_model, w, h, vae, + use_tile=True, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + refined_latent = self.tiled_ksample(upscaled_latent, upscaled_images) + + return refined_latent + + +# REQUIREMENTS: biegert/ComfyUI-CLIPSeg +class BBoxDetectorBasedOnCLIPSeg: + prompt = None + blur = None + threshold = None + dilation_factor = None + aux = None + + def __init__(self, prompt, blur, threshold, dilation_factor): + self.prompt = prompt + self.blur = blur + self.threshold = threshold + self.dilation_factor = dilation_factor + + def detect(self, image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size=1, detailer_hook=None): + mask = self.detect_combined(image, bbox_threshold, bbox_dilation) + + mask = utils.make_2d_mask(mask) + + segs = mask_to_segs(mask, False, bbox_crop_factor, True, drop_size, detailer_hook=detailer_hook) + + if detailer_hook is not None and hasattr(detailer_hook, "post_detection"): + segs = detailer_hook.post_detection(segs) + + return segs + + def detect_combined(self, image, bbox_threshold, bbox_dilation): + if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: + CLIPSeg = nodes.NODE_CLASS_MAPPINGS['CLIPSeg'] + else: + utils.try_install_custom_node('https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py', + "To use 'CLIPSegDetectorProvider', 'CLIPSeg' extension is required.") + raise Exception("'CLIPSeg' node isn't installed.") + + if self.threshold is None: + threshold = bbox_threshold + else: + threshold = self.threshold + + if self.dilation_factor is None: + dilation_factor = bbox_dilation + else: + dilation_factor = self.dilation_factor + + prompt = self.aux if self.prompt == '' and self.aux is not None else self.prompt + + mask, _, _ = CLIPSeg().segment_image(image, prompt, self.blur, threshold, dilation_factor) + mask = utils.to_binary_mask(mask) + return mask + + def setAux(self, x): + self.aux = x + + +def update_node_status(node, text, progress=None): + if PromptServer.instance.client_id is None: + return + + PromptServer.instance.send_sync("impact/update_status", { + "node": node, + "progress": progress, + "text": text + }, PromptServer.instance.client_id) + + +def random_mask_raw(mask, bbox, factor): + x1, y1, x2, y2 = bbox + w = x2 - x1 + h = y2 - y1 + + factor = max(6, int(min(w, h) * factor / 4)) + + def draw_random_circle(center, radius): + i, j = center + for x in range(int(i - radius), int(i + radius)): + for y in range(int(j - radius), int(j + radius)): + if np.linalg.norm(np.array([x, y]) - np.array([i, j])) <= radius: + mask[x, y] = 1 + + def draw_irregular_line(start, end, pivot, is_vertical): + i = start + while i < end: + base_radius = np.random.randint(5, factor) + radius = int(base_radius) + + if is_vertical: + draw_random_circle((i, pivot), radius) + else: + draw_random_circle((pivot, i), radius) + + i += radius + + def draw_irregular_line_parallel(start, end, pivot, is_vertical): + with ThreadPoolExecutor(max_workers=16) as executor: + futures = [] + step = (end - start) // 16 + for i in range(start, end, step): + future = executor.submit(draw_irregular_line, i, min(i + step, end), pivot, is_vertical) + futures.append(future) + + for future in futures: + future.result() + + draw_irregular_line_parallel(y1 + factor, y2 - factor, x1 + factor, True) + draw_irregular_line_parallel(y1 + factor, y2 - factor, x2 - factor, True) + draw_irregular_line_parallel(x1 + factor, x2 - factor, y1 + factor, False) + draw_irregular_line_parallel(x1 + factor, x2 - factor, y2 - factor, False) + + mask[y1 + factor:y2 - factor, x1 + factor:x2 - factor] = 1.0 + + +def random_mask(mask, bbox, factor, size=128): + small_mask = np.zeros((size, size)).astype(np.float32) + random_mask_raw(small_mask, (0, 0, size, size), factor) + + x1, y1, x2, y2 = bbox + small_mask = torch.tensor(small_mask).unsqueeze(0).unsqueeze(0) + bbox_mask = torch.nn.functional.interpolate(small_mask, size=(y2 - y1, x2 - x1), mode='bilinear', align_corners=False) + bbox_mask = bbox_mask.squeeze(0).squeeze(0) + mask[y1:y2, x1:x2] = bbox_mask + + +def adaptive_mask_paste(dest_mask, src_mask, bbox): + x1, y1, x2, y2 = bbox + small_mask = torch.tensor(src_mask).unsqueeze(0).unsqueeze(0) + bbox_mask = torch.nn.functional.interpolate(small_mask, size=(y2 - y1, x2 - x1), mode='bilinear', align_corners=False) + bbox_mask = bbox_mask.squeeze(0).squeeze(0) + dest_mask[y1:y2, x1:x2] = bbox_mask + + +def crop_condition_mask(mask, image, crop_region): + cond_scale = (mask.shape[1] / image.shape[1], mask.shape[2] / image.shape[2]) + mask_region = [round(v * cond_scale[i % 2]) for i, v in enumerate(crop_region)] + return utils.crop_ndarray3(mask, mask_region) + + +class SafeToGPU: + def __init__(self, size): + self.size = size + + def to_device(self, obj, device): + if utils.is_same_device(device, 'cpu'): + obj.to(device) + else: + if utils.is_same_device(obj.device, 'cpu'): # cpu to gpu + model_management.free_memory(self.size * 1.3, device) + if model_management.get_free_memory(device) > self.size * 1.3: + try: + obj.to(device) + except Exception: + logging.warning(f"[Impact Pack] The model is not moved to the '{device}' due to insufficient memory. [1]") + else: + logging.warning(f"[Impact Pack] The model is not moved to the '{device}' due to insufficient memory. [2]") + + +class SafeToGPU_stub(): + def to_device(self, obj, device): + pass + + +from comfy.cli_args import args, LatentPreviewMethod +import folder_paths +from latent_preview import TAESD, TAESDPreviewerImpl, Latent2RGBPreviewer + +try: + import comfy.latent_formats as latent_formats + + + def get_previewer(device, latent_format=latent_formats.SD15(), force=False, method=None): + previewer = None + + if method is None: + method = args.preview_method + + if method != LatentPreviewMethod.NoPreviews or force: + # TODO previewer methods + taesd_decoder_path = None + + if hasattr(latent_format, "taesd_decoder_path"): + taesd_decoder_path = folder_paths.get_full_path("vae_approx", latent_format.taesd_decoder_name) + + if method == LatentPreviewMethod.Auto: + method = LatentPreviewMethod.Latent2RGB + if taesd_decoder_path: + method = LatentPreviewMethod.TAESD + + if method == LatentPreviewMethod.TAESD: + if taesd_decoder_path: + taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) + previewer = TAESDPreviewerImpl(taesd) + else: + logging.warning("[Impact Pack] TAESD previews enabled, but could not find models/vae_approx/{}".format( + latent_format.taesd_decoder_name)) + + if previewer is None: + previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors) + return previewer + +except Exception: + logging.error("#########################################################################") + logging.error("[ERROR] ComfyUI-Impact-Pack: Please update ComfyUI to the latest version.") + logging.error("#########################################################################") diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/defs.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/defs.py new file mode 100644 index 0000000000000000000000000000000000000000..e07c66e984d25235fbf1aeae788befd57f52f20e --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/defs.py @@ -0,0 +1,17 @@ +detection_labels = [ + 'hand', 'face', 'mouth', 'eyes', 'eyebrows', 'pupils', + 'left_eyebrow', 'left_eye', 'left_pupil', 'right_eyebrow', 'right_eye', 'right_pupil', + 'short_sleeved_shirt', 'long_sleeved_shirt', 'short_sleeved_outwear', 'long_sleeved_outwear', + 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress', 'sling_dress', + "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", + "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", + "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", + "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", + "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", + "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", + "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", + "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" + ] diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py new file mode 100644 index 0000000000000000000000000000000000000000..1c76bbce042b3eba0a4cdf9ffddbf32424abce15 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py @@ -0,0 +1,560 @@ +import logging + +import impact.core as core +from nodes import MAX_RESOLUTION +import impact.segs_nodes as segs_nodes +import impact.utils as utils +import torch +from impact.core import SEG + +SAM_MODEL_TOOLTIP = {"tooltip": "Segment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input."} +SAM_MODEL_TOOLTIP_OPTIONAL = {"tooltip": "[OPTIONAL]\nSegment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input.\nGiven this input, it refines the rectangular areas detected by BBOX_DETECTOR into silhouette shapes through SAM.\nsam_model_opt takes priority over segm_detector_opt."} + +MASK_HINT_THRESHOLD_TOOLTIP = "When detection_hint is mask-area, the mask of SEGS is used as a point hint for SAM (Segment Anything).\nIn this case, only the areas of the mask with brightness values equal to or greater than mask_hint_threshold are used as hints." +MASK_HINT_USE_NEGATIVE_TOOLTIP = "When detecting with SAM (Segment Anything), negative hints are applied as follows:\nSmall: When the SEGS is smaller than 10 pixels in size\nOuter: Sampling the image area outside the SEGS region at regular intervals" + +DILATION_TOOLTIP = "Set the value to dilate the result mask. If the value is negative, it erodes the mask." +DETECTION_HINT_TOOLTIP = {"tooltip": "It is recommended to use only center-1.\nWhen refining the mask of SEGS with the SAM (Segment Anything) model, center-1 uses only the rectangular area of SEGS and a single point at the exact center as hints.\nOther options were added during the experimental stage and do not work well."} + +BBOX_EXPANSION_TOOLTIP = "When performing SAM (Segment Anything) detection within the SEGS area, the rectangular area of SEGS is expanded and used as a hint." + +class SAMDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sam_model": ("SAM_MODEL", SAM_MODEL_TOOLTIP), + "segs": ("SEGS", {"tooltip": "This is the segment information detected by the detector.\nIt refines the Mask through the SAM (Segment Anything) detector for all areas pointed to by SEGS, and combines all Masks to return as a single Mask."}), + "image": ("IMAGE", {"tooltip": "It is assumed that segs contains only the information about the detected areas, and does not include the image. SAM (Segment Anything) operates by referencing this image."}), + "detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", + "mask-points", "mask-point-bbox", "none"], DETECTION_HINT_TOOLTIP), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1, "tooltip": DILATION_TOOLTIP}), + "threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Set the sensitivity threshold for the mask detected by SAM (Segment Anything). A higher value generates a more specific mask with a narrower range. For example, when pointing to a person's area, it might detect clothes, which is a narrower range, instead of the entire person."}), + "bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1, "tooltip": BBOX_EXPANSION_TOOLTIP}), + "mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": MASK_HINT_THRESHOLD_TOOLTIP}), + "mask_hint_use_negative": (["False", "Small", "Outter"], {"tooltip": MASK_HINT_USE_NEGATIVE_TOOLTIP}) + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + return (core.make_sam_mask(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative), ) + + +class SAMDetectorSegmented: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sam_model": ("SAM_MODEL", SAM_MODEL_TOOLTIP), + "segs": ("SEGS", {"tooltip": "This is the segment information detected by the detector.\nFor the SEGS region, the masks detected by SAM (Segment Anything) are created as a unified mask and a batch of individual masks."}), + "image": ("IMAGE", {"tooltip": "It is assumed that segs contains only the information about the detected areas, and does not include the image. SAM (Segment Anything) operates by referencing this image."}), + "detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", + "mask-points", "mask-point-bbox", "none"], DETECTION_HINT_TOOLTIP), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1, "tooltip": DILATION_TOOLTIP}), + "threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1, "tooltip": BBOX_EXPANSION_TOOLTIP}), + "mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": MASK_HINT_THRESHOLD_TOOLTIP}), + "mask_hint_use_negative": (["False", "Small", "Outter"], {"tooltip": MASK_HINT_USE_NEGATIVE_TOOLTIP}) + } + } + + RETURN_TYPES = ("MASK", "MASK") + RETURN_NAMES = ("combined_mask", "batch_masks") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + combined_mask, batch_masks = core.make_sam_mask_segmented(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, + mask_hint_use_negative) + return (combined_mask, batch_masks, ) + + +class BboxDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, bbox_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None): + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: BboxDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + segs = bbox_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook) + + if labels is not None and labels != '': + labels = labels.split(',') + if len(labels) > 0: + segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels) + + return (segs, ) + + +class SegmDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_detector": ("SEGM_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, segm_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None): + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SegmDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + segs = segm_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook) + + if labels is not None and labels != '': + labels = labels.split(',') + if len(labels) > 0: + segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels) + + return (segs, ) + + +class SegmDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_detector": ("SEGM_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, segm_detector, image, threshold, dilation): + mask = segm_detector.detect_combined(image, threshold, dilation) + + if mask is None: + mask = torch.zeros((image.shape[1], image.shape[2]), dtype=torch.float32, device="cpu") + + return (mask.unsqueeze(0),) + + +class BboxDetectorCombined(SegmDetectorCombined): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 4, "min": -512, "max": 512, "step": 1}), + } + } + + def doit(self, bbox_detector, image, threshold, dilation): + mask = bbox_detector.detect_combined(image, threshold, dilation) + + if mask is None: + mask = torch.zeros((image.shape[1], image.shape[2]), dtype=torch.float32, device="cpu") + + return (mask.unsqueeze(0),) + + +class SimpleDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "post_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_model_opt": ("SAM_MODEL", SAM_MODEL_TOOLTIP_OPTIONAL), + "segm_detector_opt": ("SEGM_DETECTOR", ), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=0, sam_model_opt=None, segm_detector_opt=None, + detailer_hook=None): + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SimpleDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + if segm_detector_opt is not None and hasattr(segm_detector_opt, 'bbox_detector') and segm_detector_opt.bbox_detector == bbox_detector: + # Better segm support for YOLO-World detector + segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size, detailer_hook=detailer_hook) + else: + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size, detailer_hook=detailer_hook) + + if sam_model_opt is not None: + mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation, + sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False) + segs = core.segs_bitwise_and_mask(segs, mask) + elif segm_detector_opt is not None: + segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size, detailer_hook=detailer_hook) + mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, mask) + + segs = core.dilate_segs(segs, post_dilation) + + return (segs,) + + def doit(self, bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=0, sam_model_opt=None, segm_detector_opt=None): + + return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=post_dilation, + sam_model_opt=sam_model_opt, segm_detector_opt=segm_detector_opt) + + +class SimpleDetectorForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "detailer_pipe": ("DETAILER_PIPE", ), + "image": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "post_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, detailer_pipe, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, post_dilation=0): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SimpleDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + + return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, post_dilation=post_dilation, sam_model_opt=sam_model_opt, segm_detector_opt=segm_detector_opt, + detailer_hook=detailer_hook) + +class SAM2VideoDetectorSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image_frames": ("IMAGE", ), + + "bbox_detector": ("BBOX_DETECTOR", ), + "sam2_model": ("SAM_MODEL", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam2_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def doit(bbox_detector, sam2_model, image_frames, bbox_threshold, sam2_threshold, crop_factor, drop_size): + # ---- Check SAM2 model ---- + if not isinstance(sam2_model, core.SAM2Wrapper): + logging.error("[Impact Pack] To use the SAM2VideoDetectorSEGS node, a valid SAM2 model must be provided as input to `sam2_model`.") + raise Exception("To use the SAM2VideoDetectorSEGS node, a SAM2 model must be provided as input to `sam2_model`.") + + # ---- Detect bboxes ---- + segs = bbox_detector.detect(image_frames[0].unsqueeze(0), bbox_threshold, 0, 0, drop_size) + + # ---- If no detections, try reversed frames before giving up ---- + if len(segs[1]) == 0: + reversed_frames = torch.flip(image_frames, dims=[0]) + segs_rev = bbox_detector.detect(reversed_frames[0].unsqueeze(0), bbox_threshold, 0, 0, drop_size) + + if len(segs_rev[1]) == 0: + # No Bboxes when reversed -> Give up + h, w = image_frames.shape[1:3] + return (((h, w), []), ) + + # ---- Predict masks in reversed mode ---- + segs_masks = sam2_model.predict_video_segs(reversed_frames, segs_rev) + + # segs_masks wieder umdrehen, damit sie mit Originalframes matchen + for k in segs_masks.keys(): + segs_masks[k] = torch.flip(segs_masks[k], dims=[0]) + else: + # ---- Predict masks if BBOXES were found in forward pass---- + segs_masks = sam2_model.predict_video_segs(image_frames, segs) + + def get_whole_merged_mask(all_masks): + merged_mask = (all_masks[0] * 255).to(torch.uint8) + for mask in all_masks[1:]: + merged_mask |= (mask * 255).to(torch.uint8) + + merged_mask = (merged_mask / 255.0).to(torch.float32) + merged_mask = utils.to_binary_mask(merged_mask, 0.1)[0] + return merged_mask + + new_segs = [] + for k, v in segs_masks.items(): + v = v.squeeze(3) + m = get_whole_merged_mask(v) + seg = segs_nodes.MaskToSEGS.doit(m, False, crop_factor, False, drop_size, contour_fill=True)[0][1] + + if len(seg) == 0: + continue + + seg = seg[0] + + x1, y1, x2, y2 = seg.crop_region + masks = [] + for mask in v: + masks.append(mask[y1:y2, x1:x2]) + cropped_mask = torch.stack(masks) + cropped_mask = (cropped_mask >= (sam2_threshold * 100 - 50)).to(torch.uint8).cpu() + + new_seg = SEG( + seg.cropped_image, + cropped_mask, + seg.confidence, + seg.crop_region, + seg.bbox, + seg.label, + seg.control_net_wrapper + ) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + + +class SimpleDetectorForAnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image_frames": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "masking_mode": (["Pivot SEGS", "Combine neighboring frames", "Don't combine"],), + "segs_pivot": (["Combined mask", "1st frame mask"],), + "sam_model_opt": ("SAM_MODEL", SAM_MODEL_TOOLTIP_OPTIONAL), + "segm_detector_opt": ("SEGM_DETECTOR", ), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, + masking_mode="Pivot SEGS", segs_pivot="Combined mask", sam_model_opt=None, segm_detector_opt=None): + + h = image_frames.shape[1] + w = image_frames.shape[2] + + # gather segs for all frames + segs_by_frames = [] + for image in image_frames: + image = image.unsqueeze(0) + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size) + + if sam_model_opt is not None: + mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation, + sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False) + segs = core.segs_bitwise_and_mask(segs, mask) + elif segm_detector_opt is not None: + segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size) + mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, mask) + + segs_by_frames.append(segs) + + def get_masked_frames(): + masks_by_frame = [] + for i, segs in enumerate(segs_by_frames): + masks_in_frame = segs_nodes.SEGSToMaskList().doit(segs)[0] + current_frame_mask = (masks_in_frame[0] * 255).to(torch.uint8) + + for mask in masks_in_frame[1:]: + current_frame_mask |= (mask * 255).to(torch.uint8) + + current_frame_mask = (current_frame_mask/255.0).to(torch.float32) + current_frame_mask = utils.to_binary_mask(current_frame_mask, 0.1)[0] + + masks_by_frame.append(current_frame_mask) + + return masks_by_frame + + def get_empty_mask(): + return torch.zeros((h, w), dtype=torch.float32, device="cpu") + + def get_neighboring_mask_at(i, masks_by_frame): + prv = masks_by_frame[i-1] if i > 1 else get_empty_mask() + cur = masks_by_frame[i] + nxt = masks_by_frame[i-1] if i > 1 else get_empty_mask() + + prv = prv if prv is not None else get_empty_mask() + cur = cur.clone() if cur is not None else get_empty_mask() + nxt = nxt if nxt is not None else get_empty_mask() + + return prv, cur, nxt + + def get_merged_neighboring_mask(masks_by_frame): + if len(masks_by_frame) <= 1: + return masks_by_frame + + result = [] + for i in range(0, len(masks_by_frame)): + prv, cur, nxt = get_neighboring_mask_at(i, masks_by_frame) + cur = (cur * 255).to(torch.uint8) + cur |= (prv * 255).to(torch.uint8) + cur |= (nxt * 255).to(torch.uint8) + cur = (cur / 255.0).to(torch.float32) + cur = utils.to_binary_mask(cur, 0.1)[0] + result.append(cur) + + return result + + def get_whole_merged_mask(): + all_masks = [] + for segs in segs_by_frames: + all_masks += segs_nodes.SEGSToMaskList().doit(segs)[0] + + merged_mask = (all_masks[0] * 255).to(torch.uint8) + for mask in all_masks[1:]: + merged_mask |= (mask * 255).to(torch.uint8) + + merged_mask = (merged_mask / 255.0).to(torch.float32) + merged_mask = utils.to_binary_mask(merged_mask, 0.1)[0] + return merged_mask + + def get_pivot_segs(): + if segs_pivot == "1st frame mask": + return segs_by_frames[0][1] + else: + merged_mask = get_whole_merged_mask() + return segs_nodes.MaskToSEGS.doit(merged_mask, False, crop_factor, False, drop_size, contour_fill=True)[0] + + def get_segs(merged_neighboring=False): + pivot_segs = get_pivot_segs() + + masks_by_frame = get_masked_frames() + if merged_neighboring: + masks_by_frame = get_merged_neighboring_mask(masks_by_frame) + + new_segs = [] + for seg in pivot_segs[1]: + cropped_mask = torch.zeros(seg.cropped_mask.shape, dtype=torch.float32, device="cpu").unsqueeze(0) + pivot_mask = torch.from_numpy(seg.cropped_mask) + x1, y1, x2, y2 = seg.crop_region + for mask in masks_by_frame: + cropped_mask_at_frame = (mask[y1:y2, x1:x2] * pivot_mask).unsqueeze(0) + cropped_mask = torch.cat((cropped_mask, cropped_mask_at_frame), dim=0) + + if len(cropped_mask) > 1: + cropped_mask = cropped_mask[1:] + + new_seg = SEG(seg.cropped_image, cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return pivot_segs[0], new_segs + + # create result mask + if masking_mode == "Pivot SEGS": + return (get_pivot_segs(), ) + + elif masking_mode == "Combine neighboring frames": + return (get_segs(merged_neighboring=True), ) + + else: # elif masking_mode == "Don't combine": + return (get_segs(merged_neighboring=False), ) + + def doit(self, bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, + masking_mode="Pivot SEGS", segs_pivot="Combined mask", sam_model_opt=None, segm_detector_opt=None): + + return SimpleDetectorForAnimateDiff.detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, + masking_mode, segs_pivot, sam_model_opt, segm_detector_opt) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..119053d8b8817fa8bdc8386a06ab318b68c394c2 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py @@ -0,0 +1,189 @@ +import comfy +import re +from impact import utils + + +hf_transformer_model_urls = [ + "rizvandwiki/gender-classification-2", + "NTQAI/pedestrian_gender_recognition", + "Leilab/gender_class", + "ProjectPersonal/GenderClassifier", + "crangana/trained-gender", + "cledoux42/GenderNew_v002", + "ivensamdh/genderage2" +] + + +class HF_TransformersClassifierProvider: + @classmethod + def INPUT_TYPES(s): + global hf_transformer_model_urls + return {"required": { + "preset_repo_id": (hf_transformer_model_urls + ['Manual repo id'],), + "manual_repo_id": ("STRING", {"multiline": False}), + "device_mode": (["AUTO", "Prefer GPU", "CPU"],), + }, + } + + RETURN_TYPES = ("TRANSFORMERS_CLASSIFIER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/HuggingFace" + + def doit(self, preset_repo_id, manual_repo_id, device_mode): + from transformers import pipeline + + if preset_repo_id == 'Manual repo id': + url = manual_repo_id + else: + url = preset_repo_id + + if device_mode != 'CPU': + device = comfy.model_management.get_torch_device() + else: + device = "cpu" + + classifier = pipeline('image-classification', model=url, device=device) + + return (classifier,) + + +preset_classify_expr = [ + '#Female > #Male', + '#Female < #Male', + 'female > 0.5', + 'male > 0.5', + 'Age16to25 > 0.1', + 'Age50to69 > 0.1', +] + +symbolic_label_map = { + '#Female': {'female', 'Female', 'Human Female', 'woman', 'women', 'girl'}, + '#Male': {'male', 'Male', 'Human Male', 'man', 'men', 'boy'} +} + +def is_numeric_string(input_str): + return re.match(r'^-?\d+(\.\d+)?$', input_str) is not None + + +classify_expr_pattern = r'([^><= ]+)\s*(>|<|>=|<=|=)\s*([^><= ]+)' + + +class SEGS_Classify: + @classmethod + def INPUT_TYPES(s): + global preset_classify_expr + return {"required": { + "classifier": ("TRANSFORMERS_CLASSIFIER",), + "segs": ("SEGS",), + "preset_expr": (preset_classify_expr + ['Manual expr'],), + "manual_expr": ("STRING", {"multiline": False}), + }, + "optional": { + "ref_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("SEGS", "SEGS", "STRING") + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS", "detected_labels") + OUTPUT_IS_LIST = (False, False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/HuggingFace" + + @staticmethod + def lookup_classified_label_score(score_infos, label): + global symbolic_label_map + + if label.startswith('#'): + if label not in symbolic_label_map: + return None + else: + label = symbolic_label_map[label] + else: + label = {label} + + for x in score_infos: + if x['label'] in label: + return x['score'] + + return None + + def doit(self, classifier, segs, preset_expr, manual_expr, ref_image_opt=None): + if preset_expr == 'Manual expr': + expr_str = manual_expr + else: + expr_str = preset_expr + + match = re.match(classify_expr_pattern, expr_str) + + if match is None: + return (segs[0], []), segs, [] + + a = match.group(1) + op = match.group(2) + b = match.group(3) + + a_is_lab = not is_numeric_string(a) + b_is_lab = not is_numeric_string(b) + + classified = [] + remained_SEGS = [] + provided_labels = set() + + for seg in segs[1]: + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif ref_image_opt is not None: + # take from original image + cropped_image = utils.crop_image(ref_image_opt, seg.crop_region) + + if cropped_image is not None: + cropped_image = utils.to_pil(cropped_image) + res = classifier(cropped_image) + classified.append((seg, res)) + + for x in res: + provided_labels.add(x['label']) + else: + remained_SEGS.append(seg) + + filtered_SEGS = [] + for seg, res in classified: + if a_is_lab: + avalue = SEGS_Classify.lookup_classified_label_score(res, a) + else: + avalue = a + + if b_is_lab: + bvalue = SEGS_Classify.lookup_classified_label_score(res, b) + else: + bvalue = b + + if avalue is None or bvalue is None: + remained_SEGS.append(seg) + continue + + avalue = float(avalue) + bvalue = float(bvalue) + + if op == '>': + cond = avalue > bvalue + elif op == '<': + cond = avalue < bvalue + elif op == '>=': + cond = avalue >= bvalue + elif op == '<=': + cond = avalue <= bvalue + else: + cond = avalue == bvalue + + if cond: + filtered_SEGS.append(seg) + else: + remained_SEGS.append(seg) + + return (segs[0], filtered_SEGS), (segs[0], remained_SEGS), list(provided_labels) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8e947ff2de41b4fe8ead8c8ca61e0d85850fe7 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py @@ -0,0 +1,128 @@ +import sys +from . import hooks +from . import defs + + +class SEGSOrderedFilterDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"],), + "order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}), + "take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, target, order, take_start, take_count): + hook = hooks.SEGSOrderedFilterDetailerHook(target, order, take_start, take_count) + return (hook, ) + + +class SEGSRangeFilterDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent"],), + "mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}), + "min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, target, mode, min_value, max_value): + hook = hooks.SEGSRangeFilterDetailerHook(target, mode, min_value, max_value) + return (hook, ) + + +class SEGSLabelFilterDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "preset": (['all'] + defs.detection_labels,), + "labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, preset, labels): + hook = hooks.SEGSLabelFilterDetailerHook(labels) + return (hook, ) + + +class PreviewDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return { + "required": {"quality": ("INT", {"default": 95, "min": 20, "max": 100})}, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("DETAILER_HOOK", "UPSCALER_HOOK") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + NOT_IDEMPOTENT = True + + def doit(self, quality, unique_id): + hook = hooks.PreviewDetailerHook(unique_id, quality) + return hook, hook + + +class LamaRemoverDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask_threshold":("INT", {"default": 250, "min": 0, "max": 255, "step": 1, "display": "slider"}), + "gaussblur_radius": ("INT", {"default": 8, "min": 0, "max": 20, "step": 1, "display": "slider"}), + "skip_sampling": ("BOOLEAN", {"default": True}), + } + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, mask_threshold, gaussblur_radius, skip_sampling): + hook = hooks.LamaRemoverDetailerHook(mask_threshold, gaussblur_radius, skip_sampling) + return (hook, ) + + +class BlackPatchRetryHookProvider: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mean_thresh": ("INT", {"default": 10, "min": 0, "max": 255}), + "var_thresh": ("INT", {"default": 5, "min": 0, "max": 255}) + }, + } + + RETURN_TYPES = ("DETAILER_HOOK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + NOT_IDEMPOTENT = True + + def doit(self, mean_thresh, var_thresh): + hook = hooks.BlackPatchRetryHook(mean_thresh, var_thresh) + return hook, diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..d6c0d23df04d339f700a02d33a2600f537cd6ed5 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py @@ -0,0 +1,595 @@ +import copy +import torch +import nodes +from impact import utils +from . import segs_nodes +from thirdparty import noise_nodes +from server import PromptServer +import asyncio +import folder_paths +import os +from comfy_extras import nodes_custom_sampler +import math +import logging + + +class PixelKSampleHook: + cur_step = 0 + total_step = 0 + + def __init__(self): + pass + + def set_steps(self, info): + self.cur_step, self.total_step = info + + def post_decode(self, pixels): + return pixels + + def post_upscale(self, pixels, mask=None): + return pixels + + def post_encode(self, samples): + return samples + + def pre_decode(self, samples): + return samples + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + def post_crop_region(self, w, h, item_bbox, crop_region): + return crop_region + + def touch_scaled_size(self, w, h): + return w, h + + +class PixelKSampleHookCombine(PixelKSampleHook): + hook1 = None + hook2 = None + + def __init__(self, hook1, hook2): + super().__init__() + self.hook1 = hook1 + self.hook2 = hook2 + + def set_steps(self, info): + self.hook1.set_steps(info) + self.hook2.set_steps(info) + + def pre_decode(self, samples): + return self.hook2.pre_decode(self.hook1.pre_decode(samples)) + + def post_decode(self, pixels): + return self.hook2.post_decode(self.hook1.post_decode(pixels)) + + def post_upscale(self, pixels, mask=None): + return self.hook2.post_upscale(self.hook1.post_upscale(pixels, mask), mask) + + def post_encode(self, samples): + return self.hook2.post_encode(self.hook1.post_encode(samples)) + + def post_crop_region(self, w, h, item_bbox, crop_region): + crop_region = self.hook1.post_crop_region(w, h, item_bbox, crop_region) + return self.hook2.post_crop_region(w, h, item_bbox, crop_region) + + def touch_scaled_size(self, w, h): + w, h = self.hook1.touch_scaled_size(w, h) + return self.hook2.touch_scaled_size(w, h) + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook1.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + return self.hook2.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + +class DetailerHookCombine(PixelKSampleHookCombine): + def cycle_latent(self, latent): + latent = self.hook1.cycle_latent(latent) + latent = self.hook2.cycle_latent(latent) + return latent + + def post_detection(self, segs): + segs = self.hook1.post_detection(segs) + segs = self.hook2.post_detection(segs) + return segs + + def post_paste(self, image): + image = self.hook1.post_paste(image) + image = self.hook2.post_paste(image) + return image + + def get_custom_noise(self, seed, noise, is_touched): + noise_1st, is_touched = self.hook1.get_custom_noise(seed, noise, is_touched) + noise_2nd, is_touched = self.hook2.get_custom_noise(seed, noise, is_touched) + return noise, is_touched + + def get_custom_sampler(self): + if self.hook1.get_custom_sampler() is not None: + return self.hook1.get_custom_sampler() + else: + return self.hook2.get_custom_sampler() + + def get_skip_sampling(self): + return self.hook1.get_skip_sampling() and self.hook2.get_skip_sampling() + + def should_retry_patch(self, patch): + return self.hook1.should_retry_patch(patch) or self.hook2.should_retry_patch(patch) + + +class SimpleCfgScheduleHook(PixelKSampleHook): + target_cfg = 0 + + def __init__(self, target_cfg): + super().__init__() + self.target_cfg = target_cfg + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_cfg - cfg + current_cfg = int(cfg + gap * progress) + else: + current_cfg = self.target_cfg + + return model, seed, steps, current_cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + +class SimpleDenoiseScheduleHook(PixelKSampleHook): + def __init__(self, target_denoise): + super().__init__() + self.target_denoise = target_denoise + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_denoise - denoise + current_denoise = denoise + gap * progress + else: + current_denoise = self.target_denoise + + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, current_denoise + + +class SimpleStepsScheduleHook(PixelKSampleHook): + def __init__(self, target_steps): + super().__init__() + self.target_steps = target_steps + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_steps - steps + current_steps = int(steps + gap * progress) + else: + current_steps = self.target_steps + + return model, seed, current_steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + +class DetailerHook(PixelKSampleHook): + def cycle_latent(self, latent): + return latent + + def post_detection(self, segs): + return segs + + def post_paste(self, image): + return image + + def get_custom_noise(self, seed, noise, is_touched): + return noise, is_touched + + def get_custom_sampler(self): + return None + + def get_skip_sampling(self): + return False + + def should_retry_patch(self, patch): + return False + + +class CustomSamplerDetailerHookProvider(DetailerHook): + def __init__(self, sampler): + super().__init__() + self.sampler = sampler + + def get_custom_sampler(self): + return self.sampler + + +# class CustomNoiseDetailerHookProvider(DetailerHook): +# def __init__(self, noise): +# super().__init__() +# self.noise = noise +# +# def get_custom_noise(self, seed, noise, is_start): +# return self.noise + + +class VariationNoiseDetailerHookProvider(DetailerHook): + def __init__(self, variation_seed, variation_strength): + super().__init__() + self.variation_seed = variation_seed + self.variation_strength = variation_strength + + def get_custom_noise(self, seed, noise, is_touched): + empty_noise = {'samples': torch.zeros(noise.size())} + if not is_touched: + noise = nodes_custom_sampler.Noise_RandomNoise(seed).generate_noise(empty_noise) + noise_2nd = nodes_custom_sampler.Noise_RandomNoise(self.variation_seed).generate_noise(empty_noise) + + mixed_noise = ((1 - self.variation_strength) * noise + self.variation_strength * noise_2nd) + + # NOTE: Since the variance of the Gaussian noise in mixed_noise has changed, it must be corrected through scaling. + scale_factor = math.sqrt((1 - self.variation_strength) ** 2 + self.variation_strength ** 2) + corrected_noise = mixed_noise / scale_factor # Scale the noise to maintain variance of 1 + + return corrected_noise, True + + +class SimpleDetailerDenoiseSchedulerHook(DetailerHook): + def __init__(self, target_denoise): + super().__init__() + self.target_denoise = target_denoise + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise): + if self.total_step > 1: + progress = self.cur_step / (self.total_step - 1) + gap = self.target_denoise - denoise + current_denoise = denoise + gap * progress + else: + # ignore hook if total cycle <= 1 + current_denoise = denoise + + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, current_denoise + + +class CoreMLHook(DetailerHook): + def __init__(self, mode): + super().__init__() + resolution = mode.split('x') + + self.w = int(resolution[0]) + self.h = int(resolution[1]) + + self.override_bbox_by_segm = False + + def pre_decode(self, samples): + new_samples = copy.deepcopy(samples) + new_samples['samples'] = samples['samples'][0].unsqueeze(0) + return new_samples + + def post_encode(self, samples): + new_samples = copy.deepcopy(samples) + new_samples['samples'] = samples['samples'].repeat(2, 1, 1, 1) + return new_samples + + def post_crop_region(self, w, h, item_bbox, crop_region): + x1, y1, x2, y2 = crop_region + bx1, by1, bx2, by2 = item_bbox + crop_w = x2-x1 + crop_h = y2-y1 + + crop_ratio = crop_w/crop_h + target_ratio = self.w/self.h + if crop_ratio < target_ratio: + # shrink height + top_gap = by1 - y1 + bottom_gap = y2 - by2 + + gap_ratio = top_gap / bottom_gap + + target_height = 1/target_ratio*crop_w + delta_height = crop_h - target_height + + new_y1 = int(y1 + delta_height*gap_ratio) + new_y2 = int(new_y1 + target_height) + crop_region = x1, new_y1, x2, new_y2 + + elif crop_ratio > target_ratio: + # shrink width + left_gap = bx1 - x1 + right_gap = x2 - bx2 + + gap_ratio = left_gap / right_gap + + target_width = target_ratio*crop_h + delta_width = crop_w - target_width + + new_x1 = int(x1 + delta_width*gap_ratio) + new_x2 = int(new_x1 + target_width) + crop_region = new_x1, y1, new_x2, y2 + + return crop_region + + def touch_scaled_size(self, w, h): + return self.w, self.h + + +# REQUIREMENTS: BlenderNeko/ComfyUI Noise +class InjectNoiseHook(PixelKSampleHook): + def __init__(self, source, seed, start_strength, end_strength): + super().__init__() + self.source = source + self.seed = seed + self.start_strength = start_strength + self.end_strength = end_strength + + def post_encode(self, samples): + cur_step = self.cur_step + + size = samples['samples'].shape + seed = cur_step + self.seed + cur_step + + if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS: + NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"] + InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_Noise', + "To use 'NoiseInjectionHookProvider', 'ComfyUI Noise' extension is required.") + raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.") + + noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0] + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + strength = self.start_strength + (self.end_strength - self.start_strength) * cur_step / self.total_step + samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0] + logging.info(f"[Impact Pack] InjectNoiseHook: strength = {strength}") + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + +class UnsamplerHook(PixelKSampleHook): + def __init__(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative): + super().__init__() + self.model = model + self.cfg = cfg + self.sampler_name = sampler_name + self.steps = steps + self.start_end_at_step = start_end_at_step + self.end_end_at_step = end_end_at_step + self.scheduler = scheduler + self.normalize = normalize + self.positive = positive + self.negative = negative + + def post_encode(self, samples): + cur_step = self.cur_step + + Unsampler = noise_nodes.Unsampler + + end_at_step = self.start_end_at_step + (self.end_end_at_step - self.start_end_at_step) * cur_step / self.total_step + end_at_step = int(end_at_step) + + logging.info(f"[Impact Pack] UnsamplerHook: end_at_step = {end_at_step}") + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + samples = Unsampler().unsampler(self.model, self.cfg, self.sampler_name, self.steps, end_at_step, + self.scheduler, self.normalize, self.positive, self.negative, samples)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + +class InjectNoiseHookForDetailer(DetailerHook): + def __init__(self, source, seed, start_strength, end_strength, from_start=False): + super().__init__() + self.source = source + self.seed = seed + self.start_strength = start_strength + self.end_strength = end_strength + self.from_start = from_start + + def inject_noise(self, samples): + cur_step = self.cur_step if self.from_start else self.cur_step - 1 + total_step = self.total_step if self.from_start else self.total_step - 1 + + size = samples['samples'].shape + seed = cur_step + self.seed + cur_step + + if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS: + NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"] + InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"] + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_Noise', + "To use 'NoiseInjectionDetailerHookProvider', 'ComfyUI Noise' extension is required.") + raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.") + + noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0] + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + strength = self.start_strength + (self.end_strength - self.start_strength) * cur_step / total_step + samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + def cycle_latent(self, latent): + if self.cur_step == 0 and not self.from_start: + return latent + else: + return self.inject_noise(latent) + + +class UnsamplerDetailerHook(DetailerHook): + def __init__(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, from_start=False): + super().__init__() + self.model = model + self.cfg = cfg + self.sampler_name = sampler_name + self.steps = steps + self.start_end_at_step = start_end_at_step + self.end_end_at_step = end_end_at_step + self.scheduler = scheduler + self.normalize = normalize + self.positive = positive + self.negative = negative + self.from_start = from_start + + def unsample(self, samples): + cur_step = self.cur_step if self.from_start else self.cur_step - 1 + total_step = self.total_step if self.from_start else self.total_step - 1 + + Unsampler = noise_nodes.Unsampler + + end_at_step = self.start_end_at_step + (self.end_end_at_step - self.start_end_at_step) * cur_step / total_step + end_at_step = int(end_at_step) + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + samples = Unsampler().unsampler(self.model, self.cfg, self.sampler_name, self.steps, end_at_step, + self.scheduler, self.normalize, self.positive, self.negative, samples)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + def cycle_latent(self, latent): + if self.cur_step == 0 and not self.from_start: + return latent + else: + return self.unsample(latent) + + +class SEGSOrderedFilterDetailerHook(DetailerHook): + def __init__(self, target, order, take_start, take_count): + super().__init__() + self.target = target + self.order = order + self.take_start = take_start + self.take_count = take_count + + def post_detection(self, segs): + return segs_nodes.SEGSOrderedFilter().doit(segs, self.target, self.order, self.take_start, self.take_count)[0] + + +class SEGSRangeFilterDetailerHook(DetailerHook): + def __init__(self, target, mode, min_value, max_value): + super().__init__() + self.target = target + self.mode = mode + self.min_value = min_value + self.max_value = max_value + + def post_detection(self, segs): + return segs_nodes.SEGSRangeFilter().doit(segs, self.target, self.mode, self.min_value, self.max_value)[0] + + +class SEGSLabelFilterDetailerHook(DetailerHook): + def __init__(self, labels): + super().__init__() + self.labels = labels + + def post_detection(self, segs): + return segs_nodes.SEGSLabelFilter().doit(segs, "", self.labels)[0] + + +class LamaRemoverDetailerHook(DetailerHook): + def __init__(self, mask_threshold, gaussblur_radius, skip_sampling): + super().__init__() + self.mask_threshold = mask_threshold + self.gaussblur_radius = gaussblur_radius + self.skip_sampling = skip_sampling + + def post_upscale(self, img, mask=None): + if "LamaRemover" in nodes.NODE_CLASS_MAPPINGS: + lama_remover_obj = nodes.NODE_CLASS_MAPPINGS['LamaRemover']() + else: + utils.try_install_custom_node('https://github.com/Layer-norm/comfyui-lama-remover', + "To use 'LAMARemoverDetailerHookProvider', 'comfyui-lama-remover' nodepack is required.") + raise Exception("'LamaRemover' node is not installed.") + + return lama_remover_obj.lama_remover(img, masks=mask, mask_threshold=self.mask_threshold, gaussblur_radius=self.gaussblur_radius, invert_mask=False)[0] + + def get_skip_sampling(self): + return self.skip_sampling + + +class PreviewDetailerHook(DetailerHook): + def __init__(self, node_id, quality): + super().__init__() + self.node_id = node_id + self.quality = quality + + async def send(self, image): + if len(image) > 0: + image = image[0].unsqueeze(0) + img = utils.tensor2pil(image) + + temp_path = os.path.join(folder_paths.get_temp_directory(), 'pvhook') + + if not os.path.exists(temp_path): + os.makedirs(temp_path) + + fullpath = os.path.join(temp_path, f"{self.node_id}.webp") + img.save(fullpath, quality=self.quality) + + item = { + "filename": f"{self.node_id}.webp", + "subfolder": 'pvhook', + "type": 'temp' + } + + PromptServer.instance.send_sync("impact-preview", {'node_id': self.node_id, 'item': item}) + + def post_paste(self, image): + loop = asyncio.get_running_loop() + loop.create_task(self.send(image)) + return image + + +class BlackPatchRetryHook(DetailerHook): + def __init__(self, mean_thresh, var_thresh): + super().__init__() + assert 0 <= mean_thresh <= 255 and 0 <= var_thresh <= 255 + self.mean_thresh = mean_thresh + self.var_thresh = var_thresh + + def should_retry_patch(self, cropped_region): + # remove the first dimension (batch_size) + if cropped_region.ndim == 4: + assert cropped_region.shape[0] == 1 + cropped_region = cropped_region.squeeze(0) + + # turn image to grayscape + if cropped_region.ndim == 3: + assert cropped_region.shape[-1] in [1, 3] + cropped_region = cropped_region.mean(axis=-1) # simple average grayscale + + mean = cropped_region.mean() + var = cropped_region.var() + + return (mean <= self.mean_thresh/255) and (var <= self.var_thresh/255) \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_onnx.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..0ace3bfdc86dfd5729e75088bc809090b7d91177 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_onnx.py @@ -0,0 +1,39 @@ +import impact.additional_dependencies +import numpy as np +from impact import utils +import logging + +impact.additional_dependencies.ensure_onnx_package() + +try: + import onnxruntime + + def onnx_inference(image, onnx_model): + # prepare image + pil = utils.tensor2pil(image) + image = np.ascontiguousarray(pil) + image = image[:, :, ::-1] # to BGR image + image = image.astype(np.float32) + image -= [103.939, 116.779, 123.68] # 'caffe' mode image preprocessing + + # do detection + onnx_model = onnxruntime.InferenceSession(onnx_model, providers=["CPUExecutionProvider"]) + outputs = onnx_model.run( + [s_i.name for s_i in onnx_model.get_outputs()], + {onnx_model.get_inputs()[0].name: np.expand_dims(image, axis=0)}, + ) + + labels = [op for op in outputs if op.dtype == "int32"][0] + scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0] + boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0] + + # filter-out useless item + idx = np.where(labels[0] == -1)[0][0] + + labels = labels[0][:idx] + scores = scores[0][:idx] + boxes = boxes[0][:idx].astype(np.uint32) + + return labels, scores, boxes +except Exception: + logging.error("[Impact Pack] ComfyUI-Impact-Pack: 'onnxruntime' package doesn't support 'python 3.11', yet.\t{e}") diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py new file mode 100644 index 0000000000000000000000000000000000000000..e949e502bac5476575666f77083021bfb36ff189 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py @@ -0,0 +1,2767 @@ +import os +import sys + +import comfy.samplers +import comfy.sd +import warnings +from segment_anything import sam_model_registry +from io import BytesIO +import piexif +import zipfile +import re + +import impact.wildcards + +import impact.core as core +from impact.core import SEG +from impact.config import latent_letter_path +from nodes import MAX_RESOLUTION +from PIL import Image, ImageOps +import numpy as np +import hashlib +import json +import safetensors.torch +from PIL.PngImagePlugin import PngInfo +import comfy.model_management +import base64 +import impact.wildcards as wildcards +from . import hooks +from . import utils +import inspect +import folder_paths +import torch +import nodes +import cv2 +import logging + + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + logging.warning("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') + +model_path = folder_paths.models_dir + + +# folder_paths.supported_pt_extensions +utils.add_folder_path_and_extensions("sams", [os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) +utils.add_folder_path_and_extensions("onnx", [os.path.join(model_path, "onnx")], {'.onnx'}) + + +# Nodes +class ONNXDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model_name": (folder_paths.get_filename_list("onnx"), )}} + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "load_onnx" + + CATEGORY = "ImpactPack" + + def load_onnx(self, model_name): + model = folder_paths.get_full_path("onnx", model_name) + return (core.ONNXDetector(model), ) + + +class CLIPSegDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": False, "tooltip": "Enter the targets to be detected, separated by commas"}), + "blur": ("FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7, "tooltip": "Blurs the detected mask"}), + "threshold": ("FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4, "tooltip": "Detects only areas that are certain above the threshold."}), + "dilation_factor": ("INT", {"min": 0, "max": 10, "step": 1, "default": 4, "tooltip": "Dilates the detected mask."}), + } + } + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "Provides a detection function using CLIPSeg, which generates masks based on text prompts.\nTo use this node, the CLIPSeg custom node must be installed." + + def doit(self, text, blur, threshold, dilation_factor): + if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: + return (core.BBoxDetectorBasedOnCLIPSeg(text, blur, threshold, dilation_factor), ) + else: + logging.error("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") + raise Exception("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") + + +sam2_config_table = { + 'sam2.1_hiera_base_plus.pt': 'configs/sam2.1/sam2.1_hiera_b+.yaml', + 'sam2.1_hiera_large.pt': 'configs/sam2.1/sam2.1_hiera_l.yaml', + 'sam2.1_hiera_small.pt': 'configs/sam2.1/sam2.1_hiera_s.yaml', + 'sam2.1_hiera_tiny.pt': 'configs/sam2.1/sam2.1_hiera_t.yaml', + 'sam2_hiera_tiny.pt': 'configs/sam2/sam2_hiera_t.yaml', + 'sam2_hiera_small.pt': 'configs/sam2/sam2_hiera_s.yaml', + 'sam2_hiera_base_plus.pt': 'configs/sam2/sam2_hiera_b+.yaml', + 'sam2_hiera_large.pt': 'configs/sam2/sam2_hiera_l.yaml' +} + +class SAMLoader: + @classmethod + def INPUT_TYPES(cls): + models = [x for x in folder_paths.get_filename_list("sams") if 'hq' not in x and (x.endswith('.pt') or x.endswith('.pth') or x.endswith('.safetensors'))] + + if 'ESAM_ModelLoader_Zho' in nodes.NODE_CLASS_MAPPINGS: + models.append('ESAM') + + return { + "required": { + "model_name": (models, {"tooltip": "The detection accuracy varies depending on the SAM model. ESAM can only be used if ComfyUI-YoloWorld-EfficientSAM is installed."}), + "device_mode": (["AUTO", "Prefer GPU", "CPU"], {"tooltip": "AUTO: Only applicable when a GPU is available. It temporarily loads the SAM_MODEL into VRAM only when the detection function is used.\n" + "Prefer GPU: Tries to keep the SAM_MODEL on the GPU whenever possible. This can be used when there is sufficient VRAM available.\n" + "CPU: Always loads only on the CPU."}), + } + } + + RETURN_TYPES = ("SAM_MODEL", ) + FUNCTION = "load_model" + + CATEGORY = "ImpactPack" + + DESCRIPTION = "Load the SAM (Segment Anything) model. This can be used in places that utilize SAM detection functionality, such as SAMDetector or SimpleDetector.\nThe SAM detection functionality in Impact Pack must use the SAM_MODEL loaded through this node." + + def load_model(self, model_name, device_mode="auto"): + if model_name == 'ESAM': + if 'ESAM_ModelLoader_Zho' not in nodes.NODE_CLASS_MAPPINGS: + utils.try_install_custom_node('https://github.com/ZHO-ZHO-ZHO/ComfyUI-YoloWorld-EfficientSAM', + "To use 'ESAM' model, 'ComfyUI-YoloWorld-EfficientSAM' extension is required.") + raise Exception("'ComfyUI-YoloWorld-EfficientSAM' node isn't installed.") + + esam_loader = nodes.NODE_CLASS_MAPPINGS['ESAM_ModelLoader_Zho']() + + if device_mode == 'CPU': + esam = esam_loader.load_esam_model('CPU')[0] + else: + device_mode = 'CUDA' + esam = esam_loader.load_esam_model('CUDA')[0] + + sam_obj = core.ESAMWrapper(esam, device_mode) + esam.sam_wrapper = sam_obj + + logging.info(f"Loads EfficientSAM model: (device:{device_mode})") + return (esam, ) + elif model_name in sam2_config_table: + model_kind = 'sam2' + config = sam2_config_table[model_name] + modelname = folder_paths.get_full_path("sams", model_name) + else: + modelname = folder_paths.get_full_path("sams", model_name) + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam = sam_model_registry[model_kind](checkpoint=modelname) + + size = os.path.getsize(modelname) + safe_to = core.SafeToGPU(size) + + # Unless user explicitly wants to use CPU, we use GPU + device = comfy.model_management.get_torch_device() if device_mode == "Prefer GPU" else "CPU" + + if device_mode == "Prefer GPU": + safe_to.to_device(sam, device) + + is_auto_mode = device_mode == "AUTO" + + if model_kind == 'sam2': + sam = core.SAM2Wrapper(config=config, modelname=modelname, is_auto_mode=is_auto_mode, safe_to_gpu=safe_to, device_mode=device_mode) + logging.info(f"Loads SAM2 model: {modelname} (device:{device_mode})") + else: + sam_obj = core.SAMWrapper(sam, is_auto_mode=is_auto_mode, safe_to_gpu=safe_to) + sam.sam_wrapper = sam_obj + logging.info(f"Loads SAM model: {modelname} (device:{device_mode})") + + return (sam, ) + + +class ONNXDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "onnx_detector": ("ONNX_DETECTOR",), + "image": ("IMAGE",), + "threshold": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + OUTPUT_NODE = True + + def doit(self, onnx_detector, image, threshold, dilation, crop_factor, drop_size): + segs = onnx_detector.detect(image, threshold, dilation, crop_factor, drop_size) + return (segs, ) + + +class DetailerForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "model": ("MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size." + + @staticmethod + def get_core_module(): + return core + + @staticmethod + def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + image = image.clone() + enhanced_alpha_list = [] + enhanced_list = [] + cropped_list = [] + cnet_pil_list = [] + + segs = core.segs_scale_match(segs, image.shape) + new_segs = [] + + wildcard_concat_mode = None + if wildcard_opt is not None: + if wildcard_opt.startswith('[CONCAT]'): + wildcard_concat_mode = 'concat' + wildcard_opt = wildcard_opt[8:] + wmode, wildcard_chooser = wildcards.process_wildcard_for_segs(wildcard_opt) + else: + wmode, wildcard_chooser = None, None + + if wmode in ['ASC', 'DSC', 'ASC-SIZE', 'DSC-SIZE']: + if wmode == 'ASC': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1])) + elif wmode == 'DSC': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1]), reverse=True) + elif wmode == 'ASC-SIZE': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1])) + + else: # wmode == 'DSC-SIZE' + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1]), reverse=True) + else: + ordered_segs = segs[1] + + if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + for i, seg in enumerate(ordered_segs): + cropped_image = utils.crop_ndarray4(image.cpu().numpy(), seg.crop_region) # Never use seg.cropped_image to handle overlapping area + cropped_image = utils.to_tensor(cropped_image) + mask = utils.to_tensor(seg.cropped_mask) + mask = utils.tensor_gaussian_blur_mask(mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + logging.info("Detailer: segment skip [empty mask]") + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + if wildcard_chooser is not None and wmode != "LAB": + seg_seed, wildcard_item = wildcard_chooser.get(seg) + elif wildcard_chooser is not None and wmode == "LAB": + seg_seed, wildcard_item = None, wildcard_chooser.get(seg) + else: + seg_seed, wildcard_item = None, None + + seg_seed = seed + i if seg_seed is None else seg_seed + + if not isinstance(positive, str): + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + else: + cropped_positive = positive + + if not isinstance(negative, str): + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + else: + # Negative Conditioning is placeholder such as FLUX.1 + cropped_negative = negative + + if wildcard_item and wildcard_item.strip() == '[SKIP]': + continue + + if wildcard_item and wildcard_item.strip() == '[STOP]': + break + + orig_cropped_image = cropped_image.clone() + if not (isinstance(model, str) and model == "DUMMY"): + enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, + seg.bbox, seg_seed, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, + wildcard_opt=wildcard_item, wildcard_opt_concat_mode=wildcard_concat_mode, + detailer_hook=detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func=scheduler_func_opt, vae_tiled_encode=tiled_encode, + vae_tiled_decode=tiled_decode) + else: + enhanced_image = cropped_image + cnet_pils = None + + if cnet_pils is not None: + cnet_pil_list.extend(cnet_pils) + + if enhanced_image is not None: + # don't latent composite-> converting to latent caused poor quality + # use image paste + image = image.cpu() + enhanced_image = enhanced_image.cpu() + utils.tensor_paste(image, enhanced_image, (seg.crop_region[0], seg.crop_region[1]), mask) # this code affecting to `cropped_image`. + enhanced_list.append(enhanced_image) + + if detailer_hook is not None: + image = detailer_hook.post_paste(image) + + if enhanced_image is not None: + # Convert enhanced_pil_alpha to RGBA mode + enhanced_image_alpha = utils.tensor_convert_rgba(enhanced_image) + new_seg_image = enhanced_image.numpy() # alpha should not be applied to seg_image + + # Apply the mask + mask = utils.tensor_resize(mask, *utils.tensor_get_size(enhanced_image)) + utils.tensor_putalpha(enhanced_image_alpha, mask) + enhanced_alpha_list.append(enhanced_image_alpha) + else: + new_seg_image = None + + cropped_list.append(orig_cropped_image) # NOTE: Don't use `cropped_image` + + new_seg = SEG(new_seg_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + image_tensor = utils.tensor_convert_rgb(image) + + cropped_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) + + return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, + detailer_hook=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, + tiled_encode=False, tiled_decode=False): + + enhanced_img, *_ = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + + return (enhanced_img, ) + + +class DetailerForEachAutoRetry: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "model": ("MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + "max_retries": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size." + + @staticmethod + def get_core_module(): + return core + + @staticmethod + def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False, max_retries=1): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + image = image.clone() + enhanced_alpha_list = [] + enhanced_list = [] + cropped_list = [] + cnet_pil_list = [] + + segs = core.segs_scale_match(segs, image.shape) + new_segs = [] + + wildcard_concat_mode = None + if wildcard_opt is not None: + if wildcard_opt.startswith('[CONCAT]'): + wildcard_concat_mode = 'concat' + wildcard_opt = wildcard_opt[8:] + wmode, wildcard_chooser = wildcards.process_wildcard_for_segs(wildcard_opt) + else: + wmode, wildcard_chooser = None, None + + if wmode in ['ASC', 'DSC', 'ASC-SIZE', 'DSC-SIZE']: + if wmode == 'ASC': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1])) + elif wmode == 'DSC': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1]), reverse=True) + elif wmode == 'ASC-SIZE': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1])) + + else: # wmode == 'DSC-SIZE' + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1]), reverse=True) + else: + ordered_segs = segs[1] + + if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + for i, seg in enumerate(ordered_segs): + cropped_image = utils.crop_ndarray4(image.cpu().numpy(), seg.crop_region) # Never use seg.cropped_image to handle overlapping area + cropped_image = utils.to_tensor(cropped_image) + mask = utils.to_tensor(seg.cropped_mask) + mask = utils.tensor_gaussian_blur_mask(mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print("Detailer: segment skip [empty mask]") + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + if wildcard_chooser is not None and wmode != "LAB": + seg_seed, wildcard_item = wildcard_chooser.get(seg) + elif wildcard_chooser is not None and wmode == "LAB": + seg_seed, wildcard_item = None, wildcard_chooser.get(seg) + else: + seg_seed, wildcard_item = None, None + + seg_seed = seed + i if seg_seed is None else seg_seed + + if not isinstance(positive, str): + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + else: + cropped_positive = positive + + if not isinstance(negative, str): + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + else: + # Negative Conditioning is placeholder such as FLUX.1 + cropped_negative = negative + + if wildcard_item and wildcard_item.strip() == '[SKIP]': + continue + + if wildcard_item and wildcard_item.strip() == '[STOP]': + break + + orig_cropped_image = cropped_image.clone() + + # initialize + enhanced_image = cropped_image + cnet_pils = None + + if not (isinstance(model, str) and model == "DUMMY"): + for retry in range(max_retries): + enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, + seg.bbox, seg_seed + retry, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, + wildcard_opt=wildcard_item, wildcard_opt_concat_mode=wildcard_concat_mode, + detailer_hook=detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func=scheduler_func_opt, vae_tiled_encode=tiled_encode, + vae_tiled_decode=tiled_decode) + + if detailer_hook is None or not detailer_hook.should_retry_patch(enhanced_image): + break + + if retry + 1 == max_retries: + raise Exception("Max retries reached") + else: + print("Detect bad patch, retrying...") + + if cnet_pils is not None: + cnet_pil_list.extend(cnet_pils) + + if enhanced_image is not None: + # don't latent composite-> converting to latent caused poor quality + # use image paste + image = image.cpu() + enhanced_image = enhanced_image.cpu() + utils.tensor_paste(image, enhanced_image, (seg.crop_region[0], seg.crop_region[1]), mask) # this code affecting to `cropped_image`. + enhanced_list.append(enhanced_image) + + if detailer_hook is not None: + image = detailer_hook.post_paste(image) + + if enhanced_image is not None: + # Convert enhanced_pil_alpha to RGBA mode + enhanced_image_alpha = utils.tensor_convert_rgba(enhanced_image) + new_seg_image = enhanced_image.numpy() # alpha should not be applied to seg_image + + # Apply the mask + mask = utils.tensor_resize(mask, *utils.tensor_get_size(enhanced_image)) + utils.tensor_putalpha(enhanced_image_alpha, mask) + enhanced_alpha_list.append(enhanced_image_alpha) + else: + new_seg_image = None + + cropped_list.append(orig_cropped_image) # NOTE: Don't use `cropped_image` + + new_seg = SEG(new_seg_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + image_tensor = utils.tensor_convert_rgb(image) + + cropped_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) + + return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, + detailer_hook=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, + tiled_encode=False, tiled_decode=False, max_retries=1): + + enhanced_img, *_ = \ + DetailerForEachAutoRetry.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode, max_retries=max_retries) + + return (enhanced_img, ) + + +class DetailerForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") + RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = DetailerForEach.DESCRIPTION + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, + refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, + tiled_encode=False, tiled_decode=False): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, + tiled_encode=tiled_encode, tiled_decode=tiled_decode) + + # set fallback image + if len(cnet_pil_list) == 0: + cnet_pil_list = [utils.empty_pil_tensor()] + + return enhanced_img, new_segs, basic_pipe, cnet_pil_list + + +class FaceDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "model": ("MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + }} + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + DESCRIPTION = "This node enhances details by automatically detecting specific objects in the input image using detection models (bbox, segm, sam) and regenerating the image by enlarging the detected area based on the guide size.\nAlthough this node is specialized to simplify the commonly used facial detail enhancement workflow, it can also be used for various automatic inpainting purposes depending on the detection model." + + @staticmethod + def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, + bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1, + inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + + # make default prompt as 'face' if empty prompt for CLIPSeg + bbox_detector.setAux('face') + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size, detailer_hook=detailer_hook) + bbox_detector.setAux(None) + + # bbox + sam combination + if sam_model_opt is not None: + sam_mask = core.make_sam_mask(sam_model_opt, segs, image, sam_detection_hint, sam_dilation, + sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, ) + segs = core.segs_bitwise_and_mask(segs, sam_mask) + + elif segm_detector is not None: + segm_segs = segm_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) + + if (hasattr(segm_detector, 'override_bbox_by_segm') and segm_detector.override_bbox_by_segm and + not (detailer_hook is not None and not hasattr(detailer_hook, 'override_bbox_by_segm'))): + segs = segm_segs + else: + segm_mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, segm_mask) + + if len(segs[1]) > 0: + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard_opt, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + else: + enhanced_img = image + cropped_enhanced = [] + cropped_enhanced_alpha = [] + cnet_pil_list = [] + + # Mask Generator + mask = core.segs_to_combined_mask(segs) + + if len(cropped_enhanced) == 0: + cropped_enhanced = [utils.empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [utils.empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [utils.empty_pil_tensor()] + + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list + + def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, cycle=1, + sam_model_opt=None, segm_detector_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, + scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + + result_img = None + result_mask = None + result_cropped_enhanced = [] + result_cropped_enhanced_alpha = [] + result_cnet_images = [] + + if len(image) > 1: + logging.warning("[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") + + for i, single_image in enumerate(image): + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( + single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, + tiled_encode=tiled_encode, tiled_decode=tiled_decode) + + result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img + result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask + result_cropped_enhanced.extend(cropped_enhanced) + result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) + result_cnet_images.extend(cnet_pil_list) + + pipe = (model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) + return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, pipe, result_cnet_images + + +class LatentPixelScale: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "scale_method": (s.upscale_methods,), + "scale_factor": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.05}), + "vae": ("VAE", ), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + } + } + + RETURN_TYPES = ("LATENT", "IMAGE") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, scale_method, scale_factor, vae, use_tiled_vae, upscale_model_opt=None): + if upscale_model_opt is None: + latimg = core.latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=use_tiled_vae) + else: + latimg = core.latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model_opt, scale_factor, vae, use_tile=use_tiled_vae) + return latimg + + +class NoiseInjectionDetailerHookProvider: + schedules = ["skip_start", "from_start"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_cycle": (s.schedules,), + "source": (["CPU", "GPU"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "start_strength": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 200.0, "step": 0.01}), + "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, schedule_for_cycle, source, seed, start_strength, end_strength): + try: + hook = hooks.InjectNoiseHookForDetailer(source, seed, start_strength, end_strength, + from_start=('from_start' in schedule_for_cycle)) + return (hook, ) + except Exception as e: + logging.error(f"[Impact Pack] NoiseInjectionDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.\t{e}") + + +# class CustomNoiseDetailerHookProvider: +# @classmethod +# def INPUT_TYPES(s): +# return {"required": { +# "noise": ("NOISE",)}, +# } +# +# RETURN_TYPES = ("DETAILER_HOOK",) +# FUNCTION = "doit" +# +# CATEGORY = "ImpactPack/Detailer" +# +# def doit(self, noise): +# hook = hooks.CustomNoiseDetailerHookProvider(noise) +# return (hook, ) + + +class VariationNoiseDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01})} + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, seed, strength): + hook = hooks.VariationNoiseDetailerHookProvider(seed, strength) + return (hook, ) + + +class UnsamplerDetailerHookProvider: + schedules = ["skip_start", "from_start"] + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), + "start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), + "end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "normalize": (["disable", "enable"], ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "schedule_for_cycle": (s.schedules,), + }} + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, schedule_for_cycle): + try: + hook = hooks.UnsamplerDetailerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, + from_start=('from_start' in schedule_for_cycle)) + + return (hook, ) + except Exception as e: + logging.error(f"[Impact Pack] UnsamplerDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.\t{e}") + pass + + +class DenoiseSchedulerDetailerHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_cycle": (s.schedules,), + "target_denoise": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, schedule_for_cycle, target_denoise): + hook = hooks.SimpleDetailerDenoiseSchedulerHook(target_denoise) + return (hook, ) + + +class CoreMLDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"mode": (["512x512", "768x768", "512x768", "768x512"], )}, } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, mode): + hook = hooks.CoreMLHook(mode) + return (hook, ) + + +class CustomSamplerDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sampler": ("SAMPLER", ), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "Apply a hook that allows you to use a custom sampler in the Detailer nodes. When using `DetailerHookCombine`, the sampler from the first hook is applied." + + def doit(self, sampler): + hook = hooks.CustomSamplerDetailerHookProvider(sampler) + return (hook, ) + + +class CfgScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_cfg): + hook = None + if schedule_for_iteration == "simple": + hook = hooks.SimpleCfgScheduleHook(target_cfg) + + return (hook, ) + + +class UnsamplerHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), + "start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), + "end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "normalize": (["disable", "enable"], ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "schedule_for_iteration": (s.schedules,), + }} + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative, schedule_for_iteration): + try: + hook = None + if schedule_for_iteration == "simple": + hook = hooks.UnsamplerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, + scheduler, normalize, positive, negative) + + return (hook, ) + except Exception as e: + logging.error(f"[Impact Pack] UnsamplerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.\t{e}") + + +class NoiseInjectionHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "source": (["CPU", "GPU"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "start_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, source, seed, start_strength, end_strength): + try: + hook = None + if schedule_for_iteration == "simple": + hook = hooks.InjectNoiseHook(source, seed, start_strength, end_strength) + + return (hook, ) + except Exception as e: + logging.error(f"[Impact Pack] NoiseInjectionHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.\t{e}") + + +class DenoiseScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_denoise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_denoise): + hook = None + if schedule_for_iteration == "simple": + hook = hooks.SimpleDenoiseScheduleHook(target_denoise) + + return (hook, ) + + +class StepsScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_steps): + hook = None + if schedule_for_iteration == "simple": + hook = hooks.SimpleStepsScheduleHook(target_steps) + + return (hook, ) + + +class DetailerHookCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "hook1": ("DETAILER_HOOK",), + "hook2": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, hook1, hook2): + hook = hooks.DetailerHookCombine(hook1, hook2) + return (hook, ) + + +class PixelKSampleHookCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "hook1": ("PK_HOOK",), + "hook2": ("PK_HOOK",), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, hook1, hook2): + hook = hooks.PixelKSampleHookCombine(hook1, hook2) + return (hook, ) + + +class PixelTiledKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "tile_cnet_opt": ("CONTROL_NET", ), + "tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32}), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt=None, + pk_hook_opt=None, tile_cnet_opt=None, tile_cnet_strength=1.0, overlap=64): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_cnet_opt, + tile_size=max(tile_width, tile_height), tile_cnet_strength=tile_cnet_strength, overlap=overlap) + return (upscaler, ) + else: + utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', + "To use 'PixelTiledKSampleUpscalerProvider' node, 'BlenderNeko/ComfyUI_TiledKSampler' extension is required.") + + raise Exception("[ERROR] PixelTiledKSampleUpscalerProvider: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + + +class PixelTiledKSampleUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE",) + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "tile_cnet_opt": ("CONTROL_NET", ), + "tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, tile_width, tile_height, tiling_strategy, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, + tile_cnet_opt=None, tile_cnet_strength=1.0): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_cnet_opt, + tile_size=max(tile_width, tile_height), tile_cnet_strength=tile_cnet_strength) + return (upscaler, ) + else: + logging.error("[Impact Pack] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + raise Exception("[Impact Pack] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + + +class PixelKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.get_schedulers(), ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, pk_hook_opt=None, tile_size=512, scheduler_func_opt=None): + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, + tile_size=tile_size, scheduler_func=scheduler_func_opt) + return (upscaler, ) + + +class PixelKSampleUpscalerProviderPipe(PixelKSampleUpscalerProvider): + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (core.get_schedulers(), ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tile_cnet_opt": ("CONTROL_NET", ), + "tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit_pipe" + + CATEGORY = "ImpactPack/Upscale" + + def doit_pipe(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, + use_tiled_vae, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, + tile_size=512, scheduler_func_opt=None, tile_cnet_opt=None, tile_cnet_strength=1.0): + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, + tile_size=tile_size, scheduler_func=scheduler_func_opt, + tile_cnet_opt=tile_cnet_opt, tile_cnet_strength=tile_cnet_strength) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "vae": ("VAE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, basic_pipe, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): + + mask = utils.make_2d_mask(mask) + + _, _, vae, _, _ = basic_pipe + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) + return (upscaler, ) + + +class IterativeLatentUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",), + "step_mode": (["simple", "geometric"], {"default": "simple"}), + "vae_compression": ("INT", {"default": 8, "min": 0, "max": 256, "step": 8}) + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT", "VAE") + RETURN_NAMES = ("latent", "vae") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + # dim_reduction_factor=8 for SD1/SDXL, used to calculate actual dims from latents based on VAE + def doit(self, samples, upscale_factor, steps, temp_prefix, upscaler, step_mode="simple", vae_compression=8, unique_id=None): + h, w = samples['samples'].shape[-2:] + w, h = w * vae_compression, h * vae_compression + + if temp_prefix == "": + temp_prefix = None + + if step_mode == "geometric": + upscale_factor_unit = pow(upscale_factor, 1.0/steps) + else: # simple + upscale_factor_unit = max(0, (upscale_factor - 1.0) / steps) + + current_latent = samples + noise_mask = current_latent.get('noise_mask') + scale = 1 + + for i in range(steps-1): + if step_mode == "geometric": + scale *= upscale_factor_unit + else: # simple + scale += upscale_factor_unit + + new_w = w*scale + new_h = h*scale + core.update_node_status(unique_id, f"{i+1}/{steps} steps | x{scale:.2f}", (i+1)/steps) + logging.info(f"IterativeLatentUpscale[{i+1}/{steps}]: {new_w:.1f}x{new_h:.1f} (scale:{scale:.2f}) ") + step_info = i, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + if noise_mask is not None: + current_latent['noise_mask'] = noise_mask + + if scale < upscale_factor: + new_w = w*upscale_factor + new_h = h*upscale_factor + core.update_node_status(unique_id, f"Final step | x{upscale_factor:.2f}", 1.0) + logging.info(f"IterativeLatentUpscale[Final]: {new_w:.1f}x{new_h:.1f} (scale:{upscale_factor:.2f}) ") + step_info = steps-1, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + core.update_node_status(unique_id, "", None) + + return current_latent, upscaler.vae + + +class IterativeImageUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pixels": ("IMAGE", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",), + "vae": ("VAE",), + "step_mode": (["simple", "geometric"], {"default": "simple"}), + "vae_compression": ("INT", {"default": 8, "min": 0, "max": 256, "step": 8}) + }, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, pixels, upscale_factor, steps, temp_prefix, upscaler, vae, step_mode="simple", vae_compression=8, unique_id=None): + if temp_prefix == "": + temp_prefix = None + + core.update_node_status(unique_id, "VAEEncode (first)", 0) + if upscaler.is_tiled: + encoder = nodes.VAEEncodeTiled() + if 'overlap' in inspect.signature(encoder.encode).parameters: + latent = encoder.encode(vae, pixels, upscaler.tile_size, overlap=upscaler.overlap)[0] + else: + latent = encoder.encode(vae, pixels, upscaler.tile_size)[0] + else: + latent = nodes.VAEEncode().encode(vae, pixels)[0] + + refined_latent = IterativeLatentUpscale().doit(latent, upscale_factor, steps, temp_prefix, upscaler, step_mode, vae_compression, unique_id) + + core.update_node_status(unique_id, "VAEDecode (final)", 1.0) + if upscaler.is_tiled: + pixels = nodes.VAEDecodeTiled().decode(vae, refined_latent[0], upscaler.tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(vae, refined_latent[0])[0] + + core.update_node_status(unique_id, "", None) + + return (pixels, ) + + +class FaceDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "detailer_pipe": ("DETAILER_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the detailer_pipe, the inference stage is skipped."}), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + "tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + DESCRIPTION = FaceDetailer.DESCRIPTION + + def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, + sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, refiner_ratio=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, + tiled_encode=False, tiled_decode=False): + + result_img = None + result_mask = None + result_cropped_enhanced = [] + result_cropped_enhanced_alpha = [] + result_cnet_images = [] + + if len(image) > 1: + logging.warning("[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector, sam_model_opt, detailer_hook, \ + refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + + for i, single_image in enumerate(image): + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( + single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector, sam_model_opt, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, + tiled_encode=tiled_encode, tiled_decode=tiled_decode) + + result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img + result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask + result_cropped_enhanced.extend(cropped_enhanced) + result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) + result_cnet_images.extend(cnet_pil_list) + + if len(result_cropped_enhanced) == 0: + result_cropped_enhanced = [utils.empty_pil_tensor()] + + if len(result_cropped_enhanced_alpha) == 0: + result_cropped_enhanced_alpha = [utils.empty_pil_tensor()] + + if len(result_cnet_images) == 0: + result_cnet_images = [utils.empty_pil_tensor()] + + return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, detailer_pipe, result_cnet_images + + +class MaskDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "mask bbox", "label_off": "crop region"}), + "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "mask_mode": ("BOOLEAN", {"default": True, "label_on": "masked only", "label_off": "whole"}), + + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE", ), + "detailer_hook": ("DETAILER_HOOK",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "contour_fill": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "BASIC_PIPE", "BASIC_PIPE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "basic_pipe", "refiner_basic_pipe_opt") + OUTPUT_IS_LIST = (False, True, True, False, False) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "" + + def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, mask_mode, + seed, steps, cfg, sampler_name, scheduler, denoise, + feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1, + refiner_basic_pipe_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, + bbox_fill=False, contour_fill=True, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: MaskDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + # create segs + if mask is not None: + mask = utils.make_2d_mask(mask) + segs = core.mask_to_segs(mask, False, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + else: + segs = ((image.shape[1], image.shape[2]), []) + + enhanced_img_batch = None + cropped_enhanced_list = [] + cropped_enhanced_alpha_list = [] + + for i in range(batch_size): + if mask is not None: + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, _, _ = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed+i, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, mask_mode, + force_inpaint=True, wildcard_opt=None, detailer_hook=detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, + refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + else: + enhanced_img, cropped_enhanced, cropped_enhanced_alpha = image, [], [] + + if enhanced_img_batch is None: + enhanced_img_batch = enhanced_img + else: + enhanced_img_batch = torch.cat((enhanced_img_batch, enhanced_img), dim=0) + + cropped_enhanced_list += cropped_enhanced + cropped_enhanced_alpha_list += cropped_enhanced_alpha + + # set fallback image + if len(cropped_enhanced_list) == 0: + cropped_enhanced_list = [utils.empty_pil_tensor()] + + if len(cropped_enhanced_alpha_list) == 0: + cropped_enhanced_alpha_list = [utils.empty_pil_tensor()] + + return enhanced_img_batch, cropped_enhanced_list, cropped_enhanced_alpha_list, basic_pipe, refiner_basic_pipe_opt + + +class DetailerForEachTest(DetailerForEach): + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") + RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook=None, + cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + + # set fallback image + if len(cropped) == 0: + cropped = [utils.empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [utils.empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [utils.empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [utils.empty_pil_tensor()] + + return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list + + +class DetailerForEachTestPipe(DetailerForEachPipe): + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", ) + RETURN_NAMES = ("image", "segs", "basic_pipe", "cropped", "cropped_refined", "cropped_refined_alpha", 'cnet_images') + OUTPUT_IS_LIST = (False, False, False, True, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = DetailerForEach.DESCRIPTION + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, cycle=1, + refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, + scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, + cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, + scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) + + # set fallback image + if len(cropped) == 0: + cropped = [utils.empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [utils.empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [utils.empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [utils.empty_pil_tensor()] + + return enhanced_img, new_segs, basic_pipe, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list + + +class SegsBitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, mask): + return (core.segs_bitwise_and_mask(segs, mask), ) + + +class SegsBitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "masks": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, masks): + return (core.apply_mask_to_each_seg(segs, masks), ) + + +class BitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + DESCRIPTION = "Retains only the overlapping areas between the masks included in base_segs and the mask regions of mask_segs. SEGS with no overlapping mask areas are filtered out." + + def doit(self, base_segs, mask_segs): + mask = core.segs_to_combined_mask(mask_segs) + mask = utils.make_3d_mask(mask) + + return SegsBitwiseAndMask().doit(base_segs, mask) + + +class SubtractMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + DESCRIPTION = "Removes only the overlapping areas between the masks included in base_segs and the mask regions of mask_segs. SEGS with no overlapping mask areas are filtered out." + + def doit(self, base_segs, mask_segs): + mask = core.segs_to_combined_mask(mask_segs) + mask = utils.make_3d_mask(mask) + return (core.segs_bitwise_subtract_mask(base_segs, mask), ) + + +class ToBinaryMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "threshold": ("INT", {"default": 20, "min": 1, "max": 255}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, threshold): + mask = utils.to_binary_mask(mask, threshold/255.0) + return (mask,) + + +class FlattenMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "masks": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, masks): + masks = utils.make_3d_mask(masks) + masks = utils.flatten_mask(masks) + return (masks,) + + +class BitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = utils.bitwise_and_masks(mask1, mask2) + return (mask,) + + +class SubtractMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK", ), + "mask2": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = utils.subtract_masks(mask1, mask2) + return (mask,) + + +class AddMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = utils.add_masks(mask1, mask2) + return (mask,) + + +def get_image_hash(arr): + split_index1 = arr.shape[0] // 2 + split_index2 = arr.shape[1] // 2 + part1 = arr[:split_index1, :split_index2] + part2 = arr[:split_index1, split_index2:] + part3 = arr[split_index1:, :split_index2] + part4 = arr[split_index1:, split_index2:] + + # 각 부분을 합산 + sum1 = np.sum(part1) + sum2 = np.sum(part2) + sum3 = np.sum(part3) + sum4 = np.sum(part4) + + return hash((sum1, sum2, sum3, sum4)) + + +def get_file_item(base_type, path): + path_type = base_type + + if path == "[output]": + path_type = "output" + path = path[:-9] + elif path == "[input]": + path_type = "input" + path = path[:-8] + elif path == "[temp]": + path_type = "temp" + path = path[:-7] + + subfolder = os.path.dirname(path) + filename = os.path.basename(path) + + return { + "filename": filename, + "subfolder": subfolder, + "type": path_type + } + + +class MaskRectArea: + # Creates a rectangle mask using percentage. + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + # Added typed INT inputs so this node can be driven by other INT nodes. + "x": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), + "width": ("INT", {"default": 50, "min": 0, "max": 100, "step": 1}), + "height": ("INT", {"default": 50, "min": 0, "max": 100, "step": 1}), + "blur_radius": ("INT", {"default": 0, "min": 0, "step": 1}) + }, + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"} + } + + RETURN_TYPES = ("MASK",) + + CATEGORY = "ImpactPack/Operation" + FUNCTION = "create_mask" + + def create_mask(self, x, y, width, height, blur_radius, extra_pnginfo, unique_id): + # Backward-compat: if node properties exist in workflow, prefer them. + try: + for node in extra_pnginfo["workflow"]["nodes"]: + if str(node["id"]) == str(unique_id): + props = node.get("properties", {}) + x = int(props.get("x", x)) + y = int(props.get("y", y)) + width = int(props.get("w", width)) + height = int(props.get("h", height)) + blur_radius = int(props.get("blur_radius", blur_radius)) + break + except Exception: + pass + + # Clamp percent inputs + if x < 0: + x = 0 + if y < 0: + y = 0 + if width < 0: + width = 0 + if height < 0: + height = 0 + if x > 100: + x = 100 + if y > 100: + y = 100 + if width > 100: + width = 100 + if height > 100: + height = 100 + + # Convert percent to ratio + min_x = x / 100.0 + min_y = y / 100.0 + w_ratio = width / 100.0 + h_ratio = height / 100.0 + + # Create a mask with standard resolution (e.g., 512x512) + resolution = 512 + mask = torch.zeros((resolution, resolution), dtype=torch.float32) + + # Calculate pixel coordinates + min_x_px = int(min_x * resolution) + min_y_px = int(min_y * resolution) + max_x_px = int((min_x + w_ratio) * resolution) + max_y_px = int((min_y + h_ratio) * resolution) + + # Clamp pixel bounds + if min_x_px < 0: + min_x_px = 0 + if min_y_px < 0: + min_y_px = 0 + if max_x_px > resolution: + max_x_px = resolution + if max_y_px > resolution: + max_y_px = resolution + + # Draw the rectangle on the mask + if max_x_px > min_x_px and max_y_px > min_y_px: + mask[min_y_px:max_y_px, min_x_px:max_x_px] = 1.0 + + # Apply blur if the radii are greater than 0 + if blur_radius > 0: + dx = blur_radius * 2 + 1 + dy = blur_radius * 2 + 1 + + # Convert the mask to a format compatible with OpenCV (numpy array) + mask_np = mask.cpu().numpy().astype("float32") + + # Apply Gaussian Blur + blurred_mask = cv2.GaussianBlur(mask_np, (dx, dy), 0) + + # Convert back to tensor + mask = torch.from_numpy(blurred_mask) + + # Return the mask as a tensor with an additional channel + return (mask.unsqueeze(0),) + + +class MaskRectAreaAdvanced: + # Creates a rectangle mask using pixels relative to image size. + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "x": ("INT", {"default": 0, "min": 0, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "step": 1}), + "width": ("INT", {"default": 256, "min": 0, "step": 1}), + "height": ("INT", {"default": 320, "min": 0, "step": 1}), + "image_width": ("INT", {"default": 512, "min": 1, "step": 1}), + "image_height": ("INT", {"default": 320, "min": 1, "step": 1}), + "blur_radius": ("INT", {"default": 0, "min": 0, "step": 1}) + }, + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"} + } + + RETURN_TYPES = ("MASK",) + + CATEGORY = "ImpactPack/Operation" + FUNCTION = "create_mask_advanced" + + def create_mask_advanced(self, x, y, width, height, image_width, image_height, blur_radius, extra_pnginfo, unique_id): + # Backward-compat fallback: if node properties exist in workflow, prefer them + try: + for node in extra_pnginfo["workflow"]["nodes"]: + if node["id"] == int(unique_id): + props = node.get("properties", {}) + x = int(props.get("x", x)) + y = int(props.get("y", y)) + width = int(props.get("w", width)) + height = int(props.get("h", height)) + image_width = int(props.get("width", image_width)) + image_height = int(props.get("height", image_height)) + blur_radius = int(props.get("blur_radius", blur_radius)) + break + except Exception: + pass + + # Clamp to safe bounds + if image_width < 1: + image_width = 1 + if image_height < 1: + image_height = 1 + if width < 0: + width = 0 + if height < 0: + height = 0 + if x < 0: + x = 0 + if y < 0: + y = 0 + + max_x = min(x + width, image_width) + max_y = min(y + height, image_height) + + mask = torch.zeros((image_height, image_width), dtype=torch.float32) + + if max_x > x and max_y > y: + mask[y:max_y, x:max_x] = 1.0 + + # Apply blur if the radii are greater than 0 + if blur_radius > 0: + k = blur_radius * 2 + 1 + mask_np = mask.cpu().numpy().astype("float32") + blurred_mask = cv2.GaussianBlur(mask_np, (k, k), 0) + mask = torch.from_numpy(blurred_mask) + + # Return the mask as a tensor with an additional channel + return (mask.unsqueeze(0),) + + +class ImageReceiver: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "save_to_workflow": ("BOOLEAN", {"default": False}), + "image_data": ("STRING", {"multiline": False}), + "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + } + + FUNCTION = "doit" + + RETURN_TYPES = ("IMAGE", "MASK") + + CATEGORY = "ImpactPack/Util" + + def doit(self, image, link_id, save_to_workflow, image_data, trigger_always): + if save_to_workflow: + try: + image_data = base64.b64decode(image_data.split(",")[1]) + i = Image.open(BytesIO(image_data)) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return image, mask.unsqueeze(0) + except Exception: + logging.warning("[WARN] ComfyUI-Impact-Pack: ImageReceiver - invalid 'image_data'") + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return utils.empty_pil_tensor(64, 64), mask + else: + return nodes.LoadImage().load_image(image) + + @classmethod + def VALIDATE_INPUTS(s, image, link_id, save_to_workflow, image_data, trigger_always): + if image != '#DATA' and not folder_paths.exists_annotated_filepath(image) or image.startswith("/") or ".." in image: + return "Invalid image file: {}".format(image) + + return True + + @classmethod + def IS_CHANGED(s, image, link_id, save_to_workflow, image_data, trigger_always): + if trigger_always: + return float("NaN") + else: + if save_to_workflow: + return hash(image_data) + else: + return hash(image) + + +from server import PromptServer + +class ImageSender(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ImgSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, images, filename_prefix="ImgSender", link_id=0, prompt=None, extra_pnginfo=None): + result = nodes.PreviewImage().save_images(images, filename_prefix, prompt, extra_pnginfo) + PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": result['ui']['images']}) + return result + + +class LatentReceiver: + def __init__(self): + self.input_dir = folder_paths.get_input_directory() + self.type = "input" + + @classmethod + def INPUT_TYPES(s): + def check_file_extension(x): + return x.endswith(".latent") or x.endswith(".latent.png") + + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and check_file_extension(f)] + return {"required": { + "latent": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT",) + + @staticmethod + def load_preview_latent(image_path): + if not os.path.exists(image_path): + return None + + image = Image.open(image_path) + exif_data = piexif.load(image.info["exif"]) + + if piexif.ExifIFD.UserComment in exif_data["Exif"]: + compressed_data = exif_data["Exif"][piexif.ExifIFD.UserComment] + compressed_data_io = BytesIO(compressed_data) + with zipfile.ZipFile(compressed_data_io, mode='r') as archive: + tensor_bytes = archive.read("latent") + tensor = safetensors.torch.load(tensor_bytes) + return {"samples": tensor['latent_tensor']} + return None + + def parse_filename(self, filename): + pattern = r"^(.*)/(.*?)\[(.*)\]\s*$" + match = re.match(pattern, filename) + if match: + subfolder = match.group(1) + filename = match.group(2).rstrip() + file_type = match.group(3) + else: + subfolder = '' + file_type = self.type + + return {'filename': filename, 'subfolder': subfolder, 'type': file_type} + + def doit(self, **kwargs): + if 'latent' not in kwargs: + return (torch.zeros([1, 4, 8, 8]), ) + + latent = kwargs['latent'] + + latent_name = latent + latent_path = folder_paths.get_annotated_filepath(latent_name) + + if latent.endswith(".latent"): + latent = safetensors.torch.load_file(latent_path, device="cpu") + multiplier = 1.0 + if "latent_format_version_0" not in latent: + multiplier = 1.0 / 0.18215 + samples = {"samples": latent["latent_tensor"].float() * multiplier} + else: + samples = LatentReceiver.load_preview_latent(latent_path) + + if samples is None: + samples = {'samples': torch.zeros([1, 4, 8, 8])} + + preview = self.parse_filename(latent_name) + + return { + 'ui': {"images": [preview]}, + 'result': (samples, ) + } + + @classmethod + def IS_CHANGED(s, latent, link_id, trigger_always): + if trigger_always: + return float("NaN") + else: + image_path = folder_paths.get_annotated_filepath(latent) + m = hashlib.sha256() + with open(image_path, 'rb') as f: + m.update(f.read()) + return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, latent, link_id, trigger_always): + if not folder_paths.exists_annotated_filepath(latent) or latent.startswith("/") or ".." in latent: + return "Invalid latent file: {}".format(latent) + return True + + +class LatentSender(nodes.SaveLatent): + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "filename_prefix": ("STRING", {"default": "latents/LatentSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "preview_method": (["Latent2RGB-FLUX.1", + "Latent2RGB-SDXL", "Latent2RGB-SD15", "Latent2RGB-SD3", + "Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5", + "Latent2RGB-SC-Prior", "Latent2RGB-SC-B", + "Latent2RGB-LTXV", + "TAEF1", "TAESDXL", "TAESD15", "TAESD3"],) + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + RETURN_TYPES = () + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def save_to_file(tensor_bytes, prompt, extra_pnginfo, image, image_path): + compressed_data = BytesIO() + with zipfile.ZipFile(compressed_data, mode='w') as archive: + archive.writestr("latent", tensor_bytes) + image = image.copy() + exif_data = {"Exif": {piexif.ExifIFD.UserComment: compressed_data.getvalue()}} + + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + exif_bytes = piexif.dump(exif_data) + image.save(image_path, format='png', exif=exif_bytes, pnginfo=metadata, optimize=True) + + @staticmethod + def prepare_preview(latent_tensor, preview_method): + from comfy.cli_args import LatentPreviewMethod + import comfy.latent_formats as latent_formats + + lower_bound = 128 + upper_bound = 256 + + if preview_method == "Latent2RGB-SD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SDXL": + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SD3": + latent_format = latent_formats.SD3() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SD-X4": + latent_format = latent_formats.SD_X4() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-Playground-2.5": + latent_format = latent_formats.SDXL_Playground_2_5() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SC-Prior": + latent_format = latent_formats.SC_Prior() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-SC-B": + latent_format = latent_formats.SC_B() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-FLUX.1": + latent_format = latent_formats.Flux() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "Latent2RGB-LTXV": + latent_format = latent_formats.LTXV() + method = LatentPreviewMethod.Latent2RGB + else: + logging.warning(f"[Impact Pack] LatentSender: '{preview_method}' is unsupported preview method.") + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + + previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) + + image = previewer.decode_latent_to_preview(latent_tensor) + min_size = min(image.size[0], image.size[1]) + max_size = max(image.size[0], image.size[1]) + + scale_factor = 1 + if max_size > upper_bound: + scale_factor = upper_bound/max_size + + # prevent too small preview + if min_size*scale_factor < lower_bound: + scale_factor = lower_bound/min_size + + w = int(image.size[0] * scale_factor) + h = int(image.size[1] * scale_factor) + + image = image.resize((w, h), resample=Image.NEAREST) + + return LatentSender.attach_format_text(image) + + @staticmethod + def attach_format_text(image): + width_a, height_a = image.size + + letter_image = Image.open(latent_letter_path) + width_b, height_b = letter_image.size + + new_width = max(width_a, width_b) + new_height = height_a + height_b + + new_image = Image.new('RGB', (new_width, new_height), (0, 0, 0)) + + offset_x = (new_width - width_b) // 2 + offset_y = (height_a + (new_height - height_a - height_b) // 2) + new_image.paste(letter_image, (offset_x, offset_y)) + + new_image.paste(image, (0, 0)) + + return new_image + + def doit(self, samples, filename_prefix="latents/LatentSender", link_id=0, preview_method="Latent2RGB-SDXL", prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + # load preview + preview = LatentSender.prepare_preview(samples['samples'], preview_method) + + # support save metadata for latent sharing + file = f"{filename}_{counter:05}_.latent.png" + fullpath = os.path.join(full_output_folder, file) + + output = {"latent_tensor": samples["samples"]} + + tensor_bytes = safetensors.torch.save(output) + LatentSender.save_to_file(tensor_bytes, prompt, extra_pnginfo, preview, fullpath) + + latent_path = { + 'filename': file, + 'subfolder': subfolder, + 'type': self.type + } + + PromptServer.instance.send_sync("latent-send", {"link_id": link_id, "images": [latent_path]}) + + return {'ui': {'images': [latent_path]}} + + +class ImpactWildcardProcessor: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "Enter a prompt using wildcard syntax."}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "The actual value passed during the execution of 'ImpactWildcardProcessor' is what is shown here. The behavior varies slightly depending on the mode. Wildcard syntax can also be used in 'populated_text'."}), + "mode": (["populate", "fixed", "reproduce"], {"default": "populate", "tooltip": + "populate: Before running the workflow, it overwrites the existing value of 'populated_text' with the prompt processed from 'wildcard_text'. In this mode, 'populated_text' cannot be edited.\n" + "fixed: Ignores wildcard_text and keeps 'populated_text' as is. You can edit 'populated_text' in this mode.\n" + "reproduce: This mode operates as 'fixed' mode only once for reproduction, and then it switches to 'populate' mode." + }), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Determines the random seed to be used for wildcard processing."}), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + DESCRIPTION = ("The 'ImpactWildcardProcessor' processes text prompts written in wildcard syntax and outputs the processed text prompt.\n\n" + "TIP: Before the workflow is executed, the processing result of 'wildcard_text' is displayed in 'populated_text', and the populated text is saved along with the workflow. If you want to use a seed converted as input, write the prompt directly in 'populated_text' instead of 'wildcard_text', and set the mode to 'fixed'.") + + RETURN_TYPES = ("STRING", ) + RETURN_NAMES = ("processed text",) + FUNCTION = "doit" + + @staticmethod + def process(**kwargs): + return impact.wildcards.process(**kwargs) + + def doit(self, *args, **kwargs): + populated_text = ImpactWildcardProcessor.process(text=kwargs['populated_text'], seed=kwargs['seed']) + return (populated_text, ) + + +class ImpactWildcardEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "Enter a prompt using wildcard syntax."}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "The actual value passed during the execution of 'ImpactWildcardEncode' is what is shown here. The behavior varies slightly depending on the mode. Wildcard syntax can also be used in 'populated_text'."}), + "mode": (["populate", "fixed", "reproduce"], {"tooltip": + "populate: Before running the workflow, it overwrites the existing value of 'populated_text' with the prompt processed from 'wildcard_text'. In this mode, 'populated_text' cannot be edited.\n" + "fixed: Ignores wildcard_text and keeps 'populated_text' as is. You can edit 'populated_text' in this mode\n." + "reproduce: This mode operates as 'fixed' mode only once for reproduction, and then it switches to 'populate' mode."}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"), ), + "Select to add Wildcard": (["Select the Wildcard to add to the text"], ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Determines the random seed to be used for wildcard processing."}), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + DESCRIPTION = ("The 'ImpactWildcardEncode' node processes text prompts written in wildcard syntax and outputs them as conditioning. It also supports LoRA syntax, with the applied LoRA reflected in the model's output.\n\n" + "TIP1: Before the workflow is executed, the processing result of 'wildcard_text' is displayed in 'populated_text', and the populated text is saved along with the workflow. If you want to use a seed converted as input, write the prompt directly in 'populated_text' instead of 'wildcard_text', and set the mode to 'fixed'.\n" + "TIP2: If the 'Inspire Pack' is installed, LBW(LoRA Block Weight) syntax can also be applied.") + + RETURN_TYPES = ("MODEL", "CLIP", "CONDITIONING", "STRING") + RETURN_NAMES = ("model", "clip", "conditioning", "populated_text") + FUNCTION = "doit" + + @staticmethod + def process_with_loras(**kwargs): + return impact.wildcards.process_with_loras(**kwargs) + + @staticmethod + def get_wildcard_list(): + return impact.wildcards.get_wildcard_list() + + def doit(self, *args, **kwargs): + populated = kwargs['populated_text'] + processed = [] + model, clip, conditioning = impact.wildcards.process_with_loras(wildcard_opt=populated, model=kwargs['model'], clip=kwargs['clip'], seed=kwargs['seed'], processed=processed) + return model, clip, conditioning, processed[0] + + +class ImpactSchedulerAdapter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"defaultInput": True, }), + "extra_scheduler": (['None', 'AYS SDXL', 'AYS SD1', 'AYS SVD', 'GITS[coeff=1.2]', 'LTXV[default]', 'OSS FLUX', 'OSS Wan', 'OSS Chroma'],), + }} + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = (core.get_schedulers(),) + RETURN_NAMES = ("scheduler",) + + FUNCTION = "doit" + + def doit(self, scheduler, extra_scheduler): + if extra_scheduler != 'None': + return (extra_scheduler,) + + return (scheduler,) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_sampling.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..1fcb70c0d38a24ba11bd5c3325728ea940211a45 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_sampling.py @@ -0,0 +1,323 @@ +import logging + +import nodes +from comfy.k_diffusion import sampling as k_diffusion_sampling +from comfy import samplers +from comfy_extras import nodes_custom_sampler +import latent_preview +import comfy +import torch +import math +import comfy.model_management as mm + + +try: + from comfy_extras.nodes_custom_sampler import Noise_EmptyNoise, Noise_RandomNoise + import node_helpers +except Exception: + logging.warning("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +def calculate_sigmas(model, sampler, scheduler, steps): + discard_penultimate_sigma = False + if sampler in ['dpm_2', 'dpm_2_ancestral', 'uni_pc', 'uni_pc_bh2']: + steps += 1 + discard_penultimate_sigma = True + + if scheduler.startswith('AYS'): + sigmas = nodes.NODE_CLASS_MAPPINGS['AlignYourStepsScheduler']().get_sigmas(scheduler[4:], steps, denoise=1.0)[0] + elif scheduler.startswith('GITS[coeff='): + sigmas = nodes.NODE_CLASS_MAPPINGS['GITSScheduler']().execute(float(scheduler[11:-1]), steps, denoise=1.0)[0] + elif scheduler == 'LTXV[default]': + sigmas = nodes.NODE_CLASS_MAPPINGS['LTXVScheduler']().execute(20, 2.05, 0.95, True, 0.1)[0] + elif scheduler.startswith('OSS'): + sigmas = nodes.NODE_CLASS_MAPPINGS['OptimalStepsScheduler']().execute(scheduler[4:], steps, denoise=1.0)[0] + else: + sigmas = samplers.calculate_sigmas(model.get_model_object("model_sampling"), scheduler, steps) + + if discard_penultimate_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + return sigmas + + +def get_noise_sampler(x, cpu, total_sigmas, **kwargs): + if 'extra_args' in kwargs and 'seed' in kwargs['extra_args']: + sigma_min, sigma_max = total_sigmas[total_sigmas > 0].min(), total_sigmas.max() + seed = kwargs['extra_args'].get("seed", None) + return k_diffusion_sampling.BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=cpu) + return None + + +def ksampler(sampler_name, total_sigmas, extra_options={}, inpaint_options={}): + if sampler_name in ["dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu"]: + if sampler_name == "dpmpp_sde": + orig_sampler_function = k_diffusion_sampling.sample_dpmpp_sde + elif sampler_name == "dpmpp_sde_gpu": + orig_sampler_function = k_diffusion_sampling.sample_dpmpp_sde_gpu + elif sampler_name == "dpmpp_2m_sde": + orig_sampler_function = k_diffusion_sampling.sample_dpmpp_2m_sde + elif sampler_name == "dpmpp_2m_sde_gpu": + orig_sampler_function = k_diffusion_sampling.sample_dpmpp_2m_sde_gpu + elif sampler_name == "dpmpp_3m_sde": + orig_sampler_function = k_diffusion_sampling.sample_dpmpp_3m_sde + elif sampler_name == "dpmpp_3m_sde_gpu": + orig_sampler_function = k_diffusion_sampling.sample_dpmpp_3m_sde_gpu + + def sampler_function_wrapper(model, x, sigmas, **kwargs): + if 'noise_sampler' not in kwargs: + kwargs['noise_sampler'] = get_noise_sampler(x, 'gpu' not in sampler_name, total_sigmas, **kwargs) + + return orig_sampler_function(model, x, sigmas, **kwargs) + + sampler_function = sampler_function_wrapper + + else: + return comfy.samplers.sampler_object(sampler_name) + + return samplers.KSAMPLER(sampler_function, extra_options, inpaint_options) + + +# modified version of SamplerCustom.sample +def sample_with_custom_noise(model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image, noise=None, callback=None): + latent = latent_image + latent_image = latent["samples"] + + if hasattr(comfy.sample, 'fix_empty_latent_channels'): + latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image) + + out = latent.copy() + out['samples'] = latent_image + + if noise is None: + if not add_noise: + noise = Noise_EmptyNoise().generate_noise(out) + else: + noise = Noise_RandomNoise(noise_seed).generate_noise(out) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + x0_output = {} + preview_callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output) + + if callback is not None: + def touched_callback(step, x0, x, total_steps): + callback(step, x0, x, total_steps) + preview_callback(step, x0, x, total_steps) + else: + touched_callback = preview_callback + + disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED + + device = mm.get_torch_device() + + noise = noise.to(device) + latent_image = latent_image.to(device) + if noise_mask is not None: + noise_mask = noise_mask.to(device) + + if negative != 'NegativePlaceholder': + # This way is incompatible with Advanced ControlNet, yet. + # guider = comfy.samplers.CFGGuider(model) + # guider.set_conds(positive, negative) + # guider.set_cfg(cfg) + samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, + noise_mask=noise_mask, callback=touched_callback, + disable_pbar=disable_pbar, seed=noise_seed) + else: + guider = nodes_custom_sampler.Guider_Basic(model) + positive = node_helpers.conditioning_set_values(positive, {"guidance": cfg}) + guider.set_conds(positive) + samples = guider.sample(noise, latent_image, sampler, sigmas, denoise_mask=noise_mask, callback=touched_callback, disable_pbar=disable_pbar, seed=noise_seed) + + samples = samples.to(comfy.model_management.intermediate_device()) + + out["samples"] = samples + if "x0" in x0_output: + out_denoised = latent.copy() + out_denoised["samples"] = model.model.process_latent_out(x0_output["x0"].cpu()) + else: + out_denoised = out + return out, out_denoised + + +# When sampling one step at a time, it mitigates the problem. (especially for _sde series samplers) +def separated_sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, positive, negative, + latent_image, start_at_step, end_at_step, return_with_leftover_noise, sigma_ratio=1.0, sampler_opt=None, noise=None, callback=None, scheduler_func=None): + + if scheduler_func is not None: + total_sigmas = scheduler_func(model, sampler_name, steps) + else: + if sampler_opt is None: + total_sigmas = calculate_sigmas(model, sampler_name, scheduler, steps) + else: + total_sigmas = calculate_sigmas(model, "", scheduler, steps) + + sigmas = total_sigmas + + if end_at_step is not None and end_at_step < (len(total_sigmas) - 1): + sigmas = total_sigmas[:end_at_step + 1] + if not return_with_leftover_noise: + sigmas[-1] = 0 + + if start_at_step is not None: + if start_at_step < (len(sigmas) - 1): + sigmas = sigmas[start_at_step:] * sigma_ratio + else: + if latent_image is not None: + return latent_image + else: + return {'samples': torch.zeros_like(noise)} + + if sampler_opt is None: + impact_sampler = ksampler(sampler_name, total_sigmas) + else: + impact_sampler = sampler_opt + + if len(sigmas) == 0 or (len(sigmas) == 1 and sigmas[0] == 0): + return latent_image + + res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) + + if return_with_leftover_noise: + return res[0] + else: + return res[1] + + +def impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, sigma_ratio=1.0, sampler_opt=None, noise=None, scheduler_func=None): + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + steps + return separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + start_at_step, end_at_step, False, scheduler_func=scheduler_func) + + +def ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, sigma_factor=1.0, noise=None, scheduler_func=None, sampler_opt=None): + + if refiner_ratio is None or refiner_model is None or refiner_clip is None or refiner_positive is None or refiner_negative is None: + # Use separated_sample instead of KSampler for `AYS scheduler` + # refined_latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise * sigma_factor)[0] + + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + steps + + refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, False, + sigma_ratio=sigma_factor, sampler_opt=sampler_opt, noise=noise, scheduler_func=scheduler_func) + else: + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + math.floor(steps * (1.0 - refiner_ratio)) + + # print(f"pre: {start_at_step} .. {end_at_step} / {advanced_steps}") + temp_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, True, + sigma_ratio=sigma_factor, sampler_opt=sampler_opt, noise=noise, scheduler_func=scheduler_func) + + if 'noise_mask' in latent_image: + # noise_latent = \ + # impact_sampling.separated_sample(refiner_model, "enable", seed, advanced_steps, cfg, sampler_name, + # scheduler, refiner_positive, refiner_negative, latent_image, end_at_step, + # end_at_step, "enable") + + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + temp_latent = latent_compositor.composite(latent_image, temp_latent, 0, 0, False, latent_image['noise_mask'])[0] + + # print(f"post: {end_at_step} .. {advanced_steps + 1} / {advanced_steps}") + refined_latent = separated_sample(refiner_model, False, seed, advanced_steps, cfg, sampler_name, scheduler, + refiner_positive, refiner_negative, temp_latent, end_at_step, advanced_steps + 1, False, + sigma_ratio=sigma_factor, sampler_opt=sampler_opt, scheduler_func=scheduler_func) + + return refined_latent + + +class KSamplerAdvancedWrapper: + params = None + + def __init__(self, model, cfg, sampler_name, scheduler, positive, negative, sampler_opt=None, sigma_factor=1.0, scheduler_func=None): + self.params = model, cfg, sampler_name, scheduler, positive, negative, sigma_factor + self.sampler_opt = sampler_opt + self.scheduler_func = scheduler_func + + def clone_with_conditionings(self, positive, negative): + model, cfg, sampler_name, scheduler, _, _, _ = self.params + return KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative, self.sampler_opt) + + def sample_advanced(self, add_noise, seed, steps, latent_image, start_at_step, end_at_step, return_with_leftover_noise, hook=None, + recovery_mode="ratio additional", recovery_sampler="AUTO", recovery_sigma_ratio=1.0, noise=None): + + model, cfg, sampler_name, scheduler, positive, negative, sigma_factor = self.params + # steps, start_at_step, end_at_step = self.compensate_denoise(steps, start_at_step, end_at_step) + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent = hook.pre_ksample_advanced(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + return_with_leftover_noise) + + if recovery_mode != 'DISABLE' and sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']: + base_image = latent_image.copy() + if recovery_mode == "ratio between": + sigma_ratio = 1.0 - recovery_sigma_ratio + else: + sigma_ratio = 1.0 + else: + base_image = None + sigma_ratio = 1.0 + + try: + if sigma_ratio > 0: + latent_image = separated_sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + return_with_leftover_noise, sigma_ratio=sigma_ratio * sigma_factor, + sampler_opt=self.sampler_opt, noise=noise, scheduler_func=self.scheduler_func) + except ValueError as e: + if str(e) == 'sigma_min and sigma_max must not be 0': + logging.warning("\nWARN: sampling skipped - sigma_min and sigma_max are 0") + return latent_image + + if (recovery_sigma_ratio > 0 and recovery_mode != 'DISABLE' and + sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']): + compensate = 0 if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu'] else 2 + if recovery_sampler == "AUTO": + recovery_sampler = 'dpm_fast' if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu'] else 'dpmpp_2m' + + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + + noise_mask = latent_image['noise_mask'] + + if len(noise_mask.shape) == 4: + noise_mask = noise_mask.squeeze(0).squeeze(0) + + latent_image = latent_compositor.composite(base_image, latent_image, 0, 0, False, noise_mask)[0] + + try: + latent_image = separated_sample(model, add_noise, seed, steps, cfg, recovery_sampler, scheduler, + positive, negative, latent_image, start_at_step-compensate, end_at_step, return_with_leftover_noise, + sigma_ratio=recovery_sigma_ratio * sigma_factor, sampler_opt=self.sampler_opt, scheduler_func=self.scheduler_func) + except ValueError as e: + if str(e) == 'sigma_min and sigma_max must not be 0': + logging.warning("\nWARN: sampling skipped - sigma_min and sigma_max are 0") + + return latent_image + + +class KSamplerWrapper: + params = None + + def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, scheduler_func=None): + self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.scheduler_func = scheduler_func + + def sample(self, latent_image, hook=None): + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise) + + return impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, scheduler_func=self.scheduler_func) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py new file mode 100644 index 0000000000000000000000000000000000000000..dceed0af22cc5382f4d99b2de71ffebd2b307cf6 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py @@ -0,0 +1,619 @@ +import io +import logging +import os +import random +import threading +import traceback +from io import BytesIO + +import comfy +import folder_paths +import impact +import impact.core as core +import impact.impact_pack as impact_pack +import impact.utils as utils +import nodes +import numpy as np +import torchvision +from aiohttp import web +from impact.utils import to_tensor +from PIL import Image +from segment_anything import SamPredictor, sam_model_registry +from server import PromptServer + +sam_predictor = None +default_sam_model_name = os.path.join(impact_pack.model_path, "sams", "sam_vit_b_01ec64.pth") + +sam_lock = threading.Condition() + +last_prepare_data = None + + +def async_prepare_sam(image_dir, model_name, filename): + with sam_lock: + global sam_predictor + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam_model = sam_model_registry[model_kind](checkpoint=model_name) + sam_predictor = SamPredictor(sam_model) + + image_path = os.path.join(image_dir, filename) + image = nodes.LoadImage().load_image(image_path)[0] + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + if impact.config.get_config()['sam_editor_cpu']: + device = 'cpu' + else: + device = comfy.model_management.get_torch_device() + + sam_predictor.model.to(device=device) + sam_predictor.set_image(image, "RGB") + sam_predictor.model.cpu() + + +@PromptServer.instance.routes.post("/sam/prepare") +async def sam_prepare(request): + global sam_predictor + global last_prepare_data + data = await request.json() + + with sam_lock: + if last_prepare_data is not None and last_prepare_data == data: + # already loaded: skip -- prevent redundant loading + return web.Response(status=200) + + last_prepare_data = data + + model_name = 'sam_vit_b_01ec64.pth' + if data['sam_model_name'] == 'auto': + model_name = impact.config.get_config()['sam_editor_model'] + + model_path = folder_paths.get_full_path("sams", model_name) + + if model_path is None: + logging.error(f"[Impact Pack] The '{model_name}' model file cannot be found in any sams model path.") + return web.Response(status=400) + + logging.info(f"[Impact Pack] Loading SAM model '{model_path}'") + + filename, image_dir = folder_paths.annotated_filepath(data["filename"]) + + if image_dir is None: + typ = data['type'] if data['type'] != '' else 'output' + image_dir = folder_paths.get_directory_by_type(typ) + if data['subfolder'] is not None and data['subfolder'] != '': + image_dir += f"/{data['subfolder']}" + + if image_dir is None: + return web.Response(status=400) + + thread = threading.Thread(target=async_prepare_sam, args=(image_dir, model_path, filename,)) + thread.start() + + logging.info("[Impact Pack] SAM model loaded. ") + return web.Response(status=200) + + +@PromptServer.instance.routes.post("/sam/release") +async def release_sam(request): + global sam_predictor + + with sam_lock: + temp = sam_predictor + del temp + sam_predictor = None + + logging.info("[Impact Pack]: unloading SAM model") + + +@PromptServer.instance.routes.post("/sam/detect") +async def sam_detect(request): + global sam_predictor + with sam_lock: + if sam_predictor is not None: + if impact.config.get_config()['sam_editor_cpu']: + device = 'cpu' + else: + device = comfy.model_management.get_torch_device() + + sam_predictor.model.to(device=device) + try: + data = await request.json() + + positive_points = data['positive_points'] + negative_points = data['negative_points'] + threshold = data['threshold'] + + points = [] + plabs = [] + + for p in positive_points: + points.append(p) + plabs.append(1) + + for p in negative_points: + points.append(p) + plabs.append(0) + + detected_masks = core.sam_predict(sam_predictor, points, plabs, None, threshold) + mask = utils.combine_masks2(detected_masks) + + if mask is None: + return web.Response(status=400) + + image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + i = 255. * image.cpu().numpy() + + img = Image.fromarray(np.clip(i[0], 0, 255).astype(np.uint8)) + + img_buffer = io.BytesIO() + img.save(img_buffer, format='png') + + headers = {'Content-Type': 'image/png'} + finally: + sam_predictor.model.to(device="cpu") + + return web.Response(body=img_buffer.getvalue(), headers=headers) + + else: + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/wildcards/refresh") +async def wildcards_refresh(request): + impact.wildcards.wildcard_load() + return web.Response(status=200) + + +@PromptServer.instance.routes.get("/impact/wildcards/list") +async def wildcards_list(request): + data = {'data': impact.wildcards.get_wildcard_list()} + return web.json_response(data) + + +@PromptServer.instance.routes.get("/impact/wildcards/list/loaded") +async def wildcards_list_loaded(request): + """ + Get list of actually loaded wildcards (progressive loading in on-demand mode). + + Returns: + - In on-demand mode: only wildcards that have been loaded into memory + - In full cache mode: same as /wildcards/list (all wildcards) + """ + data = { + 'data': impact.wildcards.get_loaded_wildcard_list(), + 'on_demand_mode': impact.wildcards.is_on_demand_mode(), + 'total_available': len(impact.wildcards.available_wildcards) if impact.wildcards.is_on_demand_mode() else len(impact.wildcards.wildcard_dict) + } + return web.json_response(data) + + +@PromptServer.instance.routes.post("/impact/wildcards") +async def populate_wildcards(request): + data = await request.json() + populated = impact.wildcards.process(data['text'], data.get('seed', None)) + return web.json_response({"text": populated}) + + +segs_picker_map = {} + +@PromptServer.instance.routes.get("/impact/segs/picker/count") +async def segs_picker_count(request): + node_id = request.rel_url.query.get('id', '') + + if node_id in segs_picker_map: + res = len(segs_picker_map[node_id]) + return web.Response(status=200, text=str(res)) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/segs/picker/view") +async def segs_picker(request): + node_id = request.rel_url.query.get('id', '') + idx = int(request.rel_url.query.get('idx', '')) + + if node_id in segs_picker_map and idx < len(segs_picker_map[node_id]): + img = to_tensor(segs_picker_map[node_id][idx]).permute(0, 3, 1, 2).squeeze(0) + pil = torchvision.transforms.ToPILImage('RGB')(img) + + image_bytes = BytesIO() + pil.save(image_bytes, format="PNG") + image_bytes.seek(0) + return web.Response(status=200, body=image_bytes, content_type='image/png', headers={"Content-Disposition": f"filename={node_id}{idx}.png"}) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/view/validate") +async def view_validate(request): + if "filename" in request.rel_url.query: + filename = request.rel_url.query["filename"] + subfolder = request.rel_url.query["subfolder"] + filename, base_dir = folder_paths.annotated_filepath(filename) + + if filename == '' or filename[0] == '/' or '..' in filename: + return web.Response(status=400) + + if base_dir is None: + base_dir = folder_paths.get_input_directory() + + file = os.path.join(base_dir, subfolder, filename) + + if os.path.isfile(file): + return web.Response(status=200) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/validate/pb_id_image") +async def view_pb_id_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id not in core.preview_bridge_image_id_map: + return web.Response(status=400) + + file = core.preview_bridge_image_id_map[pb_id] + if os.path.isfile(file): + return web.Response(status=200) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/set/pb_id_image") +async def set_previewbridge_image(request): + try: + if "filename" in request.rel_url.query: + node_id = request.rel_url.query["node_id"] + filename = request.rel_url.query["filename"] + path_type = request.rel_url.query["type"] + subfolder = request.rel_url.query["subfolder"] + filename, output_dir = folder_paths.annotated_filepath(filename) + + if filename == '' or filename[0] == '/' or '..' in filename: + return web.Response(status=400) + + if output_dir is None: + if path_type == 'input': + output_dir = folder_paths.get_input_directory() + elif path_type == 'output': + output_dir = folder_paths.get_output_directory() + else: + output_dir = folder_paths.get_temp_directory() + + file = os.path.join(output_dir, subfolder, filename) + item = { + 'filename': filename, + 'type': path_type, + 'subfolder': subfolder, + } + pb_id = core.set_previewbridge_image(node_id, file, item) + + return web.Response(status=200, text=pb_id) + except Exception: + traceback.print_exc() + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/get/pb_id_image") +async def get_previewbridge_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id in core.preview_bridge_image_id_map: + _, path_item = core.preview_bridge_image_id_map[pb_id] + return web.json_response(path_item) + + return web.Response(status=400) + + +@PromptServer.instance.routes.get("/impact/view/pb_id_image") +async def view_previewbridge_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id in core.preview_bridge_image_id_map: + file = core.preview_bridge_image_id_map[pb_id] + + with Image.open(file): + filename = os.path.basename(file) + return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""}) + + return web.Response(status=400) + + +def onprompt_for_switch(json_data): + inversed_switch_info = {} + onprompt_switch_info = {} + onprompt_cond_branch_info = {} + disabled_switch = set() + + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'ImpactInversedSwitch': + # if 'sel_mode' is 'select_on_prompt' + if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'select' in v['inputs']: + select_input = v['inputs']['select'] + # if 'select' is converted input + if isinstance(select_input, list) and len(select_input) == 2: + input_node = json_data['prompt'][select_input[0]] + if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']: + inversed_switch_info[k] = input_node['inputs']['value'] + else: + logging.warning(f"\n##### ##### #####\n[Impact Pack] {cls}: For the 'select' operation, only 'select_index' of the 'ImpactInversedSwitch', which is not an input, or 'ImpactInt' and 'Primitive' are allowed as inputs if 'select_on_prompt' is selected.\n##### ##### #####\n") + else: + inversed_switch_info[k] = select_input + + elif cls in ['ImpactSwitch', 'LatentSwitch', 'SEGSSwitch', 'ImpactMakeImageList']: + # if 'sel_mode' is 'select_on_prompt' + if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'select' in v['inputs']: + select_input = v['inputs']['select'] + # if 'select' is converted input + if isinstance(select_input, list) and len(select_input) == 2: + input_node = json_data['prompt'][select_input[0]] + if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']: + onprompt_switch_info[k] = input_node['inputs']['value'] + if input_node['class_type'] == 'ImpactSwitch' and 'inputs' in input_node and 'select' in input_node['inputs']: + if isinstance(input_node['inputs']['select'], int): + onprompt_switch_info[k] = input_node['inputs']['select'] + else: + logging.warning(f"\n##### ##### #####\n[Impact Pack] {cls}: For the 'select' operation, only 'select_index' of the 'ImpactSwitch', which is not an input, or 'ImpactInt' and 'Primitive' are allowed as inputs if 'select_on_prompt' is selected.\n##### ##### #####\n") + else: + onprompt_switch_info[k] = select_input + + if k in onprompt_switch_info and f'input{onprompt_switch_info[k]}' not in v['inputs']: + # disconnect output + disabled_switch.add(k) + + elif cls == 'ImpactConditionalBranchSelMode': + if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'cond' in v['inputs']: + cond_input = v['inputs']['cond'] + if isinstance(cond_input, list) and len(cond_input) == 2: + input_node = json_data['prompt'][cond_input[0]] + if (input_node['class_type'] == 'ImpactValueReceiver' and 'inputs' in input_node + and 'value' in input_node['inputs'] and 'typ' in input_node['inputs']): + if 'BOOLEAN' == input_node['inputs']['typ']: + try: + onprompt_cond_branch_info[k] = input_node['inputs']['value'].lower() == "true" + except Exception: + pass + else: + onprompt_cond_branch_info[k] = cond_input + + for k, v in json_data['prompt'].items(): + disable_targets = set() + + for kk, vv in v['inputs'].items(): + if isinstance(vv, list) and len(vv) == 2: + if vv[0] in inversed_switch_info: + if vv[1] + 1 != inversed_switch_info[vv[0]]: + disable_targets.add(kk) + else: + del inversed_switch_info[k] + + if vv[0] in disabled_switch: + disable_targets.add(kk) + + if k in onprompt_switch_info: + selected_slot_name = f"input{onprompt_switch_info[k]}" + for kk, vv in v['inputs'].items(): + if kk != selected_slot_name and kk.startswith('input'): + disable_targets.add(kk) + + if k in onprompt_cond_branch_info: + selected_slot_name = "tt_value" if onprompt_cond_branch_info[k] else "ff_value" + for kk, vv in v['inputs'].items(): + if kk in ['tt_value', 'ff_value'] and kk != selected_slot_name: + disable_targets.add(kk) + + for kk in disable_targets: + del v['inputs'][kk] + + # inversed_switch - select out of range + for target in inversed_switch_info.keys(): + del json_data['prompt'][target]['inputs']['input'] + + +def onprompt_for_pickers(json_data): + detected_pickers = set() + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'ImpactSEGSPicker': + detected_pickers.add(k) + + # garbage collection + keys_to_remove = [key for key in segs_picker_map if key not in detected_pickers] + for key in keys_to_remove: + del segs_picker_map[key] + + +def gc_preview_bridge_cache(json_data): + prompt_keys = json_data['prompt'].keys() + + for key in list(core.preview_bridge_cache.keys()): + if key not in prompt_keys: + # print(f"key deleted [PB]: {key}") + del core.preview_bridge_cache[key] + + for key in list(core.preview_bridge_last_mask_cache.keys()): + if key not in prompt_keys: + # print(f"key deleted [PB_last_mask]: {key}") + del core.preview_bridge_last_mask_cache[key] + + +def workflow_imagereceiver_update(json_data): + prompt = json_data['prompt'] + + for v in prompt.values(): + if 'class_type' in v and v['class_type'] == 'ImageReceiver': + if v['inputs']['save_to_workflow']: + v['inputs']['image'] = "#DATA" + + +def regional_sampler_seed_update(json_data): + prompt = json_data['prompt'] + + for k, v in prompt.items(): + if 'class_type' in v and v['class_type'] == 'RegionalSampler': + seed_2nd_mode = v['inputs']['seed_2nd_mode'] + + new_seed = None + if seed_2nd_mode == 'increment': + new_seed = v['inputs']['seed_2nd']+1 + if new_seed > 1125899906842624: + new_seed = 0 + elif seed_2nd_mode == 'decrement': + new_seed = v['inputs']['seed_2nd']-1 + if new_seed < 0: + new_seed = 1125899906842624 + elif seed_2nd_mode == 'randomize': + new_seed = random.randint(0, 1125899906842624) + + if new_seed is not None: + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "seed_2nd", "type": "INT", "value": new_seed}) + + +def find_input_value(input_node, prompt, input_type=int, input_keys=('value',)): + input_val = None + + try: + for n in input_keys: + input_val = input_node['inputs'].get(n, None) + if isinstance(input_val, input_type): + break + elif isinstance(input_val, list) and len(input_val): + input_val = find_input_value(prompt[input_val[0]], prompt=prompt, input_type=input_type, input_keys=input_keys) + if input_val is not None: + break + + except Exception as e : + logging.warning(f"[Impact Pack] Error encountered on find {input_type} value - {e}") + + return input_val + + +def onprompt_populate_wildcards(json_data): + prompt = json_data['prompt'] + + updated_widget_values = {} + for k, v in prompt.items(): + if 'class_type' in v and (v['class_type'] == 'ImpactWildcardEncode' or v['class_type'] == 'ImpactWildcardProcessor'): + inputs = v['inputs'] + + # legacy adapter + if isinstance(inputs['mode'], bool): + if inputs['mode']: + new_mode = 'populate' + else: + new_mode = 'fixed' + + inputs['mode'] = new_mode + + if inputs['mode'] == 'populate' and isinstance(inputs['populated_text'], str): + if isinstance(inputs['seed'], list): + try: + input_node = prompt[inputs['seed'][0]] + if input_node['class_type'] == 'ImpactInt': + input_seed = int(input_node['inputs']['value']) + if not isinstance(input_seed, int): + continue + elif input_node['class_type'] == 'Seed (rgthree)': + input_seed = int(input_node['inputs']['seed']) + if not isinstance(input_seed, int): + continue + else: + input_seed = find_input_value(input_node, prompt=prompt, input_type=int, input_keys=('int', 'seed', 'value')) + if input_seed is None: + logging.info(f"[Impact Pack] Only `ImpactInt`, `Seed (rgthree)` and `Primitive` Node are allowed as the seed for '{v['class_type']}'. It will be ignored. ") + continue + except Exception: + continue + else: + input_seed = int(inputs['seed']) + + inputs['populated_text'] = impact.wildcards.process(inputs['wildcard_text'], input_seed) + inputs['mode'] = 'reproduce' + + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "populated_text", "type": "STRING", "value": inputs['populated_text']}) + updated_widget_values[k] = inputs['populated_text'] + + if inputs['mode'] == 'reproduce': + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "mode", "type": "STRING", "value": 'populate'}) + + + + match json_data: + case {"extra_data": {"extra_pnginfo": {"workflow": {"nodes": nodes}}}}: + for node in nodes: + match node: + case {"id": id, "widgets_values": widgets_values}: + key = str(id) + if key in updated_widget_values: + widgets_values[1] = updated_widget_values[key] + widgets_values[2] = "reproduce" + + +def onprompt_for_remote(json_data): + prompt = json_data['prompt'] + + for v in prompt.values(): + if 'class_type' in v: + cls = v['class_type'] + if cls == 'ImpactRemoteBoolean' or cls == 'ImpactRemoteInt': + inputs = v['inputs'] + node_id = str(inputs['node_id']) + + if node_id not in prompt: + continue + + target_inputs = prompt[node_id]['inputs'] + + widget_name = inputs['widget_name'] + if widget_name in target_inputs: + widget_type = None + if cls == 'ImpactRemoteBoolean' and isinstance(target_inputs[widget_name], bool): + widget_type = 'BOOLEAN' + + elif cls == 'ImpactRemoteInt' and (isinstance(target_inputs[widget_name], int) or isinstance(target_inputs[widget_name], float)): + widget_type = 'INT' + + if widget_type is None: + break + + target_inputs[widget_name] = inputs['value'] + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": node_id, "widget_name": widget_name, "type": widget_type, "value": inputs['value']}) + + +def onprompt(json_data): + try: + onprompt_for_remote(json_data) # NOTE: top priority + onprompt_for_switch(json_data) + onprompt_for_pickers(json_data) + onprompt_populate_wildcards(json_data) + gc_preview_bridge_cache(json_data) + workflow_imagereceiver_update(json_data) + regional_sampler_seed_update(json_data) + core.current_prompt = json_data + except Exception: + logging.exception("[Impact Pack] ComfyUI-Impact-Pack: Error on prompt - several features will not work.") + + return json_data + + +PromptServer.instance.add_on_prompt_handler(onprompt) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/logics.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/logics.py new file mode 100644 index 0000000000000000000000000000000000000000..0f425be549b3842864e4ae671f3f8919acc3e971 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/logics.py @@ -0,0 +1,779 @@ +import sys +import time + +import execution +import impact.impact_server +from server import PromptServer +from impact.utils import any_typ +import impact.core as core +import re +import nodes +import logging + + +class ImpactCompare: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cmp": (['a = b', 'a <> b', 'a > b', 'a < b', 'a >= b', 'a <= b', 'tt', 'ff'],), + "a": (any_typ, ), + "b": (any_typ, ), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, cmp, a, b): + if cmp == "a = b": + return (a == b, ) + elif cmp == "a <> b": + return (a != b, ) + elif cmp == "a > b": + return (a > b, ) + elif cmp == "a < b": + return (a < b, ) + elif cmp == "a >= b": + return (a >= b, ) + elif cmp == "a <= b": + return (a <= b, ) + elif cmp == 'tt': + return (True, ) + else: + return (False, ) + + +class ImpactNotEmptySEGS: + @classmethod + def INPUT_TYPES(cls): + return {"required": {"segs": ("SEGS",)}} + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, segs): + return (segs[1] != [], ) + + +class ImpactConditionalBranch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cond": ("BOOLEAN",), + "tt_value": (any_typ,{"lazy": True}), + "ff_value": (any_typ,{"lazy": True}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def check_lazy_status(self, cond, tt_value=None, ff_value=None): + if cond and tt_value is None: + return ["tt_value"] + if not cond and ff_value is None: + return ["ff_value"] + + def doit(self, cond, tt_value=None, ff_value=None): + if cond: + return (tt_value,) + else: + return (ff_value,) + + +class ImpactConditionalBranchSelMode: + @classmethod + def INPUT_TYPES(cls): + if not core.is_execution_model_version_supported(): + required_inputs = { + "cond": ("BOOLEAN",), + "sel_mode": ("BOOLEAN", {"default": True, "label_on": "select_on_prompt", "label_off": "select_on_execution"}), + } + else: + required_inputs = { + "cond": ("BOOLEAN",), + } + + return { + "required": required_inputs, + "optional": { + "tt_value": (any_typ,), + "ff_value": (any_typ,), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, cond, tt_value=None, ff_value=None, **kwargs): + if cond: + return (tt_value,) + else: + return (ff_value,) + + +class ImpactConvertDataType: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": {"value": (any_typ,)}} + + RETURN_TYPES = ("STRING", "FLOAT", "INT", "BOOLEAN") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + @staticmethod + def is_number(string): + pattern = re.compile(r'^[-+]?[0-9]*\.?[0-9]+$') + return bool(pattern.match(string)) + + def doit(self, value): + if self.is_number(str(value)): + num = value + else: + if str.lower(str(value)) != "false": + num = 1 + else: + num = 0 + return (str(value), float(num), int(float(num)), bool(float(num)), ) + + +class ImpactIfNone: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {}, + "optional": {"signal": (any_typ,), "any_input": (any_typ,), } + } + + RETURN_TYPES = (any_typ, "BOOLEAN") + RETURN_NAMES = ("signal_opt", "bool") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + def doit(self, signal=None, any_input=None): + if any_input is None: + return (signal, False, ) + else: + return (signal, True, ) + + +class ImpactLogicalOperators: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "operator": (['and', 'or', 'xor'],), + "bool_a": ("BOOLEAN", {"forceInput": True}), + "bool_b": ("BOOLEAN", {"forceInput": True}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, operator, bool_a, bool_b): + if operator == "and": + return (bool_a and bool_b, ) + elif operator == "or": + return (bool_a or bool_b, ) + else: + return (bool_a != bool_b, ) + + +class ImpactConditionalStopIteration: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { "cond": ("BOOLEAN", {"forceInput": True}), }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = () + + OUTPUT_NODE = True + + def doit(self, cond): + if cond: + PromptServer.instance.send_sync("stop-iteration", {}) + return {} + + +class ImpactNeg: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { "value": ("BOOLEAN", {"forceInput": True}), }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, value): + return (not value, ) + + +class ImpactInt: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("INT", ) + + def doit(self, value): + return (value, ) + + +class ImpactFloat: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 1.0, "min": -3.402823466e+38, "max": 3.402823466e+38}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("FLOAT", ) + + def doit(self, value): + return (value, ) + + +class ImpactBoolean: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("BOOLEAN", {"default": False}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, value): + return (value, ) + + +class ImpactValueSender: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": (any_typ, ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + "optional": { + "signal_opt": (any_typ,), + } + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + RETURN_NAMES = ("signal", ) + + def doit(self, value, link_id=0, signal_opt=None): + PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value}) + return (signal_opt, ) + + +class ImpactIntConstSender: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ, ), + "value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = () + + def doit(self, signal, value, link_id=0): + PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value}) + return {} + + +class ImpactValueReceiver: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "typ": (["STRING", "INT", "FLOAT", "BOOLEAN"], ), + "value": ("STRING", {"default": ""}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, typ, value, link_id=0): + if typ == "INT": + return (int(value), ) + elif typ == "FLOAT": + return (float(value), ) + elif typ == "BOOLEAN": + return (value.lower() == "true", ) + else: + return (value, ) + + +class ImpactImageInfo: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": ("IMAGE", ), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("batch", "height", "width", "channel") + + def doit(self, value): + return (value.shape[0], value.shape[1], value.shape[2], value.shape[3]) + + +class ImpactLatentInfo: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": ("LATENT", ), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("batch", "height", "width", "channel") + + def doit(self, value): + shape = value['samples'].shape + return (shape[0], shape[2] * 8, shape[3] * 8, shape[1]) + + +class ImpactMinMax: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "mode": ("BOOLEAN", {"default": True, "label_on": "max", "label_off": "min"}), + "a": (any_typ,), + "b": (any_typ,), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", ) + + def doit(self, mode, a, b): + if mode: + return (max(a, b), ) + else: + return (min(a, b),) + + +class ImpactQueueTrigger: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, mode): + if(mode): + PromptServer.instance.send_sync("impact-add-queue", {}) + + return (signal,) + + +class ImpactQueueTriggerCountdown: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "count": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "total": ("INT", {"default": 10, "min": 1, "max": 0xffffffffffffffff}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}), + }, + "optional": {"signal": (any_typ,),}, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ, "INT", "INT") + RETURN_NAMES = ("signal_opt", "count", "total") + OUTPUT_NODE = True + + def doit(self, count, total, mode, unique_id, signal=None): + if (mode): + if count < total - 1: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": unique_id, "widget_name": "count", "type": "int", "value": count+1}) + PromptServer.instance.send_sync("impact-add-queue", {}) + if count >= total - 1: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": unique_id, "widget_name": "count", "type": "int", "value": 0}) + + return (signal, count, total) + + + +class ImpactSetWidgetValue: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + }, + "optional": { + "boolean_value": ("BOOLEAN", {"forceInput": True}), + "int_value": ("INT", {"forceInput": True}), + "float_value": ("FLOAT", {"forceInput": True}), + "string_value": ("STRING", {"forceInput": True}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, node_id, widget_name, boolean_value=None, int_value=None, float_value=None, string_value=None, ): + kind = None + if boolean_value is not None: + value = boolean_value + kind = "BOOLEAN" + elif int_value is not None: + value = int_value + kind = "INT" + elif float_value is not None: + value = float_value + kind = "FLOAT" + elif string_value is not None: + value = string_value + kind = "STRING" + else: + value = None + + if value is not None: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": node_id, "widget_name": widget_name, "type": kind, "value": value}) + + return (signal,) + + +class ImpactNodeSetMuteState: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "set_state": ("BOOLEAN", {"default": True, "label_on": "active", "label_off": "mute"}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, node_id, set_state): + PromptServer.instance.send_sync("impact-node-mute-state", {"node_id": node_id, "is_active": set_state}) + return (signal,) + + +class ImpactSleep: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "seconds": ("FLOAT", {"default": 0.5, "min": 0, "max": 3600}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, seconds): + time.sleep(seconds) + return (signal,) + + +def workflow_to_map(workflow): + nodes = {} + links = {} + for link in workflow['links']: + links[link[0]] = link[1:] + for node in workflow['nodes']: + nodes[str(node['id'])] = node + + return nodes, links + + +class ImpactRemoteBoolean: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + "value": ("BOOLEAN", {"default": True, "label_on": "True", "label_off": "False"}), + }} + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = () + OUTPUT_NODE = True + + def doit(self, **kwargs): + return {} + + +class ImpactRemoteInt: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + "value": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff}), + }} + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = () + OUTPUT_NODE = True + + def doit(self, **kwargs): + return {} + +class ImpactControlBridge: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": (any_typ,), + "mode": ("BOOLEAN", {"default": True, "label_on": "Active", "label_off": "Stop/Mute/Bypass"}), + "behavior": (["Stop", "Mute", "Bypass"], ), + }, + "hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"} + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("value",) + OUTPUT_NODE = True + + DESCRIPTION = ("When behavior is Stop and mode is active, the input value is passed directly to the output.\n" + "When behavior is Mute/Bypass and mode is active, the node connected to the output is changed to active state.\n" + "When behavior is Stop and mode is Stop/Mute/Bypass, the workflow execution of the current node is halted.\n" + "When behavior is Mute/Bypass and mode is Stop/Mute/Bypass, the node connected to the output is changed to Mute/Bypass state.") + + @classmethod + def IS_CHANGED(self, value, mode, behavior="Stop", unique_id=None, prompt=None, extra_pnginfo=None): + if behavior == "Stop": + return value, mode, behavior + else: + # NOTE: extra_pnginfo is not populated for IS_CHANGED. + # so extra_pnginfo is useless in here + try: + workflow = core.current_prompt['extra_data']['extra_pnginfo']['workflow'] + except Exception: + logging.info("[Impact Pack] core.current_prompt['extra_data']['extra_pnginfo']['workflow']") + return 0 + + nodes, links = workflow_to_map(workflow) + next_nodes = [] + + for link in nodes[unique_id]['outputs'][0]['links']: + node_id = str(links[link][2]) + impact.utils.collect_non_reroute_nodes(nodes, links, next_nodes, node_id) + + return next_nodes + + def doit(self, value, mode, behavior="Stop", unique_id=None, prompt=None, extra_pnginfo=None): + global error_skip_flag + + if core.is_execution_model_version_supported(): + from comfy_execution.graph import ExecutionBlocker + else: + logging.info("[Impact Pack] ImpactControlBridge: ComfyUI is outdated. The 'Stop' behavior cannot function properly.") + + if behavior == "Stop": + if mode: + return (value, ) + else: + return (ExecutionBlocker(None), ) + elif extra_pnginfo is None: + logging.warning(f"[Impact Pack] limitation: '{behavior}' behavior cannot be used in API execution.") + return (value,) + else: + workflow_nodes, links = workflow_to_map(extra_pnginfo['workflow']) + + active_nodes = [] + mute_nodes = [] + bypass_nodes = [] + + for link in workflow_nodes[unique_id]['outputs'][0]['links']: + node_id = str(links[link][2]) + + next_nodes = [] + impact.utils.collect_non_reroute_nodes(workflow_nodes, links, next_nodes, node_id) + + for next_node_id in next_nodes: + node_mode = workflow_nodes[next_node_id]['mode'] + + if node_mode == 0: + active_nodes.append(next_node_id) + elif node_mode == 2: + mute_nodes.append(next_node_id) + elif node_mode == 4: + bypass_nodes.append(next_node_id) + + if mode: + # active + should_be_active_nodes = mute_nodes + bypass_nodes + if len(should_be_active_nodes) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'actives': list(should_be_active_nodes)}) + nodes.interrupt_processing() + + elif behavior == "Mute" or behavior == True: # noqa: E712 + # mute + should_be_mute_nodes = active_nodes + bypass_nodes + if len(should_be_mute_nodes) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'mutes': list(should_be_mute_nodes)}) + nodes.interrupt_processing() + + else: + # bypass + should_be_bypass_nodes = active_nodes + mute_nodes + if len(should_be_bypass_nodes) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'bypasses': list(should_be_bypass_nodes)}) + nodes.interrupt_processing() + + return (value, ) + + +class ImpactExecutionOrderController: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "value": (any_typ,), + }} + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + RETURN_TYPES = (any_typ, any_typ) + RETURN_NAMES = ("signal", "value") + + def doit(self, signal, value): + return signal, value + + +class ImpactListBridge: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "list_input": (any_typ,), + }} + + FUNCTION = "doit" + + DESCRIPTION = "When passing the list output through this node, it collects and organizes the data before forwarding it, which ensures that the previous stage's sub-workflow has been completed." + + CATEGORY = "ImpactPack/Util" + RETURN_TYPES = (any_typ, ) + RETURN_NAMES = ("list_output", ) + + INPUT_IS_LIST = True + OUTPUT_IS_LIST = (True, ) + + @staticmethod + def doit(list_input): + return (list_input,) + + +original_handle_execution = execution.PromptExecutor.handle_execution_error + + +def handle_execution_error(**kwargs): + execution.PromptExecutor.handle_execution_error(**kwargs) + diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..29d1f0e486f98a072c05f258638f932fd0f3d15c --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py @@ -0,0 +1,440 @@ +import folder_paths +from impact.utils import any_typ + + +class ToDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"], ), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL",), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }} + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + pipe = (kwargs['model'], kwargs['clip'], kwargs['vae'], kwargs['positive'], kwargs['negative'], kwargs['wildcard'], kwargs['bbox_detector'], + kwargs.get('segm_detector_opt', None), kwargs.get('sam_model_opt', None), kwargs.get('detailer_hook', None), + kwargs.get('refiner_model', None), kwargs.get('refiner_clip', None), + kwargs.get('refiner_positive', None), kwargs.get('refiner_negative', None)) + return (pipe, ) + + +class ToDetailerPipeSDXL(ToDetailerPipe): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "refiner_model": ("MODEL",), + "refiner_clip": ("CLIP",), + "refiner_positive": ("CONDITIONING",), + "refiner_negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL",), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }} + + +class FromDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK") + RETURN_NAMES = ("model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe + return model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook + + +class FromDetailerPipe_v2: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK") + RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe + return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook + + +class FromDetailerPipe_SDXL: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK", "MODEL", "CLIP", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative + + +class AnyPipeToBasic: + @classmethod + def INPUT_TYPES(s): + return { + "required": {"any_pipe": (any_typ,)}, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, any_pipe): + return (any_pipe[:5], ) + + +class ToBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + }, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, model, clip, vae, positive, negative): + pipe = (model, clip, vae, positive, negative) + return (pipe, ) + + +class FromBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), }, } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("model", "clip", "vae", "positive", "negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe): + model, clip, vae, positive, negative = basic_pipe + return model, clip, vae, positive, negative + + +class FromBasicPipe_v2: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), }, } + + RETURN_TYPES = ("BASIC_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("basic_pipe", "model", "clip", "vae", "positive", "negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe): + model, clip, vae, positive, negative = basic_pipe + return basic_pipe, model, clip, vae, positive, negative + + +class BasicPipeToDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + basic_pipe = kwargs['basic_pipe'] + bbox_detector = kwargs['bbox_detector'] + wildcard = kwargs['wildcard'] + sam_model_opt = kwargs.get('sam_model_opt', None) + segm_detector_opt = kwargs.get('segm_detector_opt', None) + detailer_hook = kwargs.get('detailer_hook', None) + + model, clip, vae, positive, negative = basic_pipe + pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None + return (pipe, ) + + +class BasicPipeToDetailerPipeSDXL: + @classmethod + def INPUT_TYPES(s): + return {"required": {"base_basic_pipe": ("BASIC_PIPE",), + "refiner_basic_pipe": ("BASIC_PIPE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + base_basic_pipe = kwargs['base_basic_pipe'] + refiner_basic_pipe = kwargs['refiner_basic_pipe'] + bbox_detector = kwargs['bbox_detector'] + wildcard = kwargs['wildcard'] + sam_model_opt = kwargs.get('sam_model_opt', None) + segm_detector_opt = kwargs.get('segm_detector_opt', None) + detailer_hook = kwargs.get('detailer_hook', None) + + model, clip, vae, positive, negative = base_basic_pipe + refiner_model, refiner_clip, refiner_vae, refiner_positive, refiner_negative = refiner_basic_pipe + pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative + return (pipe, ) + + +class DetailerPipeToBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }} + + RETURN_TYPES = ("BASIC_PIPE", "BASIC_PIPE") + RETURN_NAMES = ("base_basic_pipe", "refiner_basic_pipe") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, _, _, _, _, _, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + pipe = model, clip, vae, positive, negative + refiner_pipe = refiner_model, refiner_clip, vae, refiner_positive, refiner_negative + return (pipe, refiner_pipe) + + +class EditBasicPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": {"basic_pipe": ("BASIC_PIPE",), }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + }, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe, model=None, clip=None, vae=None, positive=None, negative=None): + res_model, res_clip, res_vae, res_positive, res_negative = basic_pipe + + if model is not None: + res_model = model + + if clip is not None: + res_clip = clip + + if vae is not None: + res_vae = vae + + if positive is not None: + res_positive = positive + + if negative is not None: + res_negative = negative + + pipe = res_model, res_clip, res_vae, res_positive, res_negative + + return (pipe, ) + + +class EditDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detailer_pipe": ("DETAILER_PIPE",), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR",), + "sam_model": ("SAM_MODEL",), + "segm_detector": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE",) + RETURN_NAMES = ("detailer_pipe",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + detailer_pipe = kwargs['detailer_pipe'] + wildcard = kwargs['wildcard'] + model = kwargs.get('model', None) + clip = kwargs.get('clip', None) + vae = kwargs.get('vae', None) + positive = kwargs.get('positive', None) + negative = kwargs.get('negative', None) + bbox_detector = kwargs.get('bbox_detector', None) + sam_model = kwargs.get('sam_model', None) + segm_detector = kwargs.get('segm_detector', None) + detailer_hook = kwargs.get('detailer_hook', None) + refiner_model = kwargs.get('refiner_model', None) + refiner_clip = kwargs.get('refiner_clip', None) + refiner_positive = kwargs.get('refiner_positive', None) + refiner_negative = kwargs.get('refiner_negative', None) + + res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative = detailer_pipe + + if model is not None: + res_model = model + + if clip is not None: + res_clip = clip + + if vae is not None: + res_vae = vae + + if positive is not None: + res_positive = positive + + if negative is not None: + res_negative = negative + + if bbox_detector is not None: + res_bbox_detector = bbox_detector + + if segm_detector is not None: + res_segm_detector = segm_detector + + if wildcard != "": + res_wildcard = wildcard + + if sam_model is not None: + res_sam_model = sam_model + + if detailer_hook is not None: + res_detailer_hook = detailer_hook + + if refiner_model is not None: + res_refiner_model = refiner_model + + if refiner_clip is not None: + res_refiner_clip = refiner_clip + + if refiner_positive is not None: + res_refiner_positive = refiner_positive + + if refiner_negative is not None: + res_refiner_negative = refiner_negative + + pipe = (res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, + res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, + res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative) + + return (pipe, ) + + +class EditDetailerPipeSDXL(EditDetailerPipe): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detailer_pipe": ("DETAILER_PIPE",), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "refiner_model": ("MODEL",), + "refiner_clip": ("CLIP",), + "refiner_positive": ("CONDITIONING",), + "refiner_negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR",), + "sam_model": ("SAM_MODEL",), + "segm_detector": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f8cd024d50a6a48605c1ff014c9124d20f805f38 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py @@ -0,0 +1,2029 @@ +import os +import sys + +import impact.impact_server +from nodes import MAX_RESOLUTION + +from . import core +from .core import SEG +import impact.utils as utils +from . import defs +from . import segs_upscaler +from comfy.cli_args import args +import math +from PIL import Image +import comfy +import numpy as np +import torch +import folder_paths +import logging + + +from typing import Callable, Union + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + logging.info("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") + raise Exception("[Impact Pack] ComfyUI is an outdated version.") + + +class SEGSDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("SEGS", "IMAGE") + RETURN_NAMES = ("segs", "cnet_images") + OUTPUT_IS_LIST = (False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is applied specifically to SEGS rather than the entire image. To apply it to the entire image, use the 'SEGS Paste' node." + + @staticmethod + def do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio=None, batch_size=1, cycle=1, + refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + model, clip, vae, positive, negative = basic_pipe + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + segs = core.segs_scale_match(segs, image.shape) + + new_segs = [] + cnet_pil_list = [] + + if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + for i in range(batch_size): + seed += 1 + for seg in segs[1]: + cropped_image = seg.cropped_image if seg.cropped_image is not None \ + else utils.crop_ndarray4(image.numpy(), seg.crop_region) + cropped_image = utils.to_tensor(cropped_image) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + logging.info("Detailer: segment skip [empty mask]") + new_segs.append(seg) + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + cropped_positive = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in positive + ] + + cropped_negative = [ + [condition, { + k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v + for k, v in details.items() + }] + for condition, details in negative + ] + + if not (isinstance(model, str) and model == "DUMMY"): + enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + control_net_wrapper=seg.control_net_wrapper, cycle=cycle, + inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt) + else: + enhanced_image = cropped_image + cnet_pils = None + + if cnet_pils is not None: + cnet_pil_list.extend(cnet_pils) + + if enhanced_image is None: + new_cropped_image = cropped_image + else: + new_cropped_image = enhanced_image + + new_seg = SEG(utils.to_numpy(new_cropped_image), seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return (segs[0], new_segs), cnet_pil_list + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio=None, batch_size=1, cycle=1, + refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + if len(image) > 1: + raise Exception('[Impact Pack] ERROR: SEGSDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') + + segs, cnet_pil_list = SEGSDetailer.do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio, batch_size, cycle=cycle, + refiner_basic_pipe_opt=refiner_basic_pipe_opt, + inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + + # set fallback image + if len(cnet_pil_list) == 0: + cnet_pil_list = [utils.empty_pil_tensor()] + + return segs, cnet_pil_list + + + +class SEGSPaste: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "alpha": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + }, + "optional": {"ref_image_opt": ("IMAGE", ), } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + DESCRIPTION = "This node provides a function to paste the enhanced SEGS, improved through the SEGS detailer, back onto the original image." + + @staticmethod + def doit(image, segs, feather, alpha=255, ref_image_opt=None): + # Optimized SEGS paste node: preallocates result and avoids repeated concat. + segs = core.segs_scale_match(segs, image.shape) + + batch_size, _, _, _ = image.shape + result = torch.empty_like(image) + + with torch.no_grad(): + for i in range(batch_size): + # avoid extra clone/unsqueeze + image_i = image[i].unsqueeze(0).clone() + + for seg in segs[1]: + ref_image = None + + # ref_image handling + if ref_image_opt is None and seg.cropped_image is not None: + cropped_image = seg.cropped_image + if isinstance(cropped_image, np.ndarray): + cropped_image = torch.from_numpy(cropped_image) + ref_image = cropped_image[i].unsqueeze(0) + elif ref_image_opt is not None: + ref_tensor = ref_image_opt[i].unsqueeze(0) + ref_image = utils.crop_image(ref_tensor, seg.crop_region) + + if ref_image is None: + continue + + # mask handling + cmask = seg.cropped_mask + if cmask.ndim == 3 and len(cmask) == batch_size: + mask = cmask[i] + elif cmask.ndim == 3 and len(cmask) > 1: + # statt OR-Schleife → vektorisiert + mask = torch.any(cmask > 0.1, dim=0).float() + else: # ndim == 2 + mask = cmask + + # blur + alpha + mask = utils.tensor_gaussian_blur_mask(mask, feather) * (alpha / 255.0) + + # ensure same device + mask = mask.to(image_i.device) + ref_image = ref_image.to(image_i.device) + + x, y, *_ = seg.crop_region + utils.tensor_paste(image_i, ref_image, (x, y), mask) + + result[i] = image_i[0] + + if not args.highvram and not args.gpu_only: + result = result.cpu() + + return (result,) + + +class SEGSPreviewCNet: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS", ),}, } + + RETURN_TYPES = ("IMAGE", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + OUTPUT_NODE = True + + def doit(self, segs): + full_output_folder, filename, counter, subfolder, filename_prefix = \ + folder_paths.get_save_image_path("impact_seg_preview", self.output_dir, segs[0][1], segs[0][0]) + + results = list() + result_image_list = [] + + for seg in segs[1]: + file = f"{filename}_{counter:05}_.webp" + + if seg.control_net_wrapper is not None and seg.control_net_wrapper.control_image is not None: + cnet_image = seg.control_net_wrapper.control_image + result_image_list.append(cnet_image) + else: + cnet_image = utils.empty_pil_tensor(64, 64) + + cnet_pil = utils.tensor2pil(cnet_image) + cnet_pil.save(os.path.join(full_output_folder, file)) + + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + counter += 1 + + return {"ui": {"images": results}, "result": (result_image_list,)} + + +class SEGSPreview: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "alpha_mode": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "min_alpha": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + OUTPUT_NODE = True + + def doit(self, segs, alpha_mode=True, min_alpha=0.0, fallback_image_opt=None): + full_output_folder, filename, counter, subfolder, filename_prefix = \ + folder_paths.get_save_image_path("impact_seg_preview", self.output_dir, segs[0][1], segs[0][0]) + + results = list() + result_image_list = [] + + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + if min_alpha != 0: + min_alpha = int(255 * min_alpha) + + if len(segs[1]) > 0: + if segs[1][0].cropped_image is not None: + batch_count = len(segs[1][0].cropped_image) + elif fallback_image_opt is not None: + batch_count = len(fallback_image_opt) + else: + return {"ui": {"images": results}} + + for seg in segs[1]: + result_image_batch = None + cached_mask = None + + def get_combined_mask(): + nonlocal cached_mask + + if cached_mask is not None: + return cached_mask + else: + if isinstance(seg.cropped_mask, np.ndarray): + masks = torch.tensor(seg.cropped_mask) + else: + masks = seg.cropped_mask + + cached_mask = (masks[0] * 255).to(torch.uint8) + for x in masks[1:]: + cached_mask |= (x * 255).to(torch.uint8) + cached_mask = (cached_mask/255.0).to(torch.float32) + cached_mask = utils.to_binary_mask(cached_mask, 0.1) + cached_mask = cached_mask.numpy() + + return cached_mask + + def stack_image(image, mask=None): + nonlocal result_image_batch + + if isinstance(image, np.ndarray): + image = torch.from_numpy(image) + + if mask is not None: + image *= torch.tensor(mask)[None, ..., None] + + if result_image_batch is None: + result_image_batch = image + else: + result_image_batch = torch.concat((result_image_batch, image), dim=0) + + for i in range(batch_count): + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image[i, None] + elif fallback_image_opt is not None: + # take from original image + ref_image = fallback_image_opt[i].unsqueeze(0) + cropped_image = utils.crop_image(ref_image, seg.crop_region) + + if cropped_image is not None: + if isinstance(cropped_image, np.ndarray): + cropped_image = torch.from_numpy(cropped_image) + + cropped_image = cropped_image.clone() + cropped_pil = utils.to_pil(cropped_image) + + if alpha_mode: + if isinstance(seg.cropped_mask, np.ndarray): + cropped_mask = seg.cropped_mask + else: + if seg.cropped_image is not None and len(seg.cropped_image) != len(seg.cropped_mask): + cropped_mask = get_combined_mask() + else: + cropped_mask = seg.cropped_mask[i].numpy() + + mask_array = (cropped_mask * 255).astype(np.uint8) + + if min_alpha != 0: + mask_array[mask_array < min_alpha] = min_alpha + + mask_pil = Image.fromarray(mask_array, mode='L').resize(cropped_pil.size) + cropped_pil.putalpha(mask_pil) + stack_image(cropped_image, cropped_mask) + else: + stack_image(cropped_image) + + file = f"{filename}_{counter:05}_.webp" + cropped_pil.save(os.path.join(full_output_folder, file)) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + counter += 1 + + if result_image_batch is not None: + result_image_list.append(result_image_batch) + + return {"ui": {"images": results}, "result": (result_image_list,) } + + +class SEGSLabelFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "preset": (['all'] + defs.detection_labels, ), + "labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def filter(segs, labels): + labels = set([label.strip() for label in labels]) + + if 'all' in labels: + return (segs, (segs[0], []), ) + else: + res_segs = [] + remained_segs = [] + + for x in segs[1]: + if x.label in labels: + res_segs.append(x) + elif 'eyes' in labels and x.label in ['left_eye', 'right_eye']: + res_segs.append(x) + elif 'eyebrows' in labels and x.label in ['left_eyebrow', 'right_eyebrow']: + res_segs.append(x) + elif 'pupils' in labels and x.label in ['left_pupil', 'right_pupil']: + res_segs.append(x) + else: + remained_segs.append(x) + + return ((segs[0], res_segs), (segs[0], remained_segs), ) + + def doit(self, segs, preset, labels): + labels = labels.split(',') + return SEGSLabelFilter.filter(segs, labels) + + +class SEGSLabelAssign: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "labels": ("STRING", {"multiline": True, "placeholder": "List the label to be assigned in order of segs, separated by commas"}), + }, + } + + RETURN_TYPES = ("SEGS",) + RETURN_NAMES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def assign(segs, labels): + labels = [label.strip() for label in labels] + + if len(labels) != len(segs[1]): + logging.warning(f'[Impact Pack] SEGSLabelAssign: length of labels ({len(labels)}) != length of segs ({len(segs[1])})') + + labeled_segs = [] + + idx = 0 + for x in segs[1]: + if len(labels) > idx: + x = x._replace(label=labels[idx]) + labeled_segs.append(x) + idx += 1 + + return ((segs[0], labeled_segs), ) + + def doit(self, segs, labels): + labels = labels.split(',') + return SEGSLabelAssign.assign(segs, labels) + + +class SEGSOrderedFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "confidence", "none"],), + "order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}), + "take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def get_sort_key_fn(target: str) -> Union[Callable, None]: + if target == "none": + return None + + def sort_key_fn(seg): + x1, y1, x2, y2 = seg.crop_region + if target == "confidence": return seg.confidence + if target == "area(=w*h)": return (x2 - x1) * (y2 - y1) + if target == "width": return x2 - x1 + if target == "height": return y2 - y1 + if target == "x1": return x1 + if target == "y1": return y1 + if target == "x2": return x2 + if target == "y2": return y2 + raise Exception(f"[Impact Pack] SEGSOrderedFilter - Unexpected target '{target}'") + + return sort_key_fn + + def doit(self, segs, target, order, take_start, take_count): + sort_key_fn = SEGSOrderedFilter.get_sort_key_fn(target) + + sorted_list = list(segs[1]) # make a shallow copy, so it does not mutate the original list when sort + if sort_key_fn is not None: + sorted_list.sort(key=sort_key_fn, reverse=order) + + take_stop = take_start + take_count + return (segs[0], sorted_list[take_start:take_stop]), \ + (segs[0], sorted_list[:take_start] + sorted_list[take_stop:]), + + +class SEGSRangeFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent", "confidence(0-100)"],), + "mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}), + "min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, mode, min_value, max_value): + new_segs = [] + remained_segs = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "length_percent": + h = y2 - y1 + w = x2 - x1 + value = max(h/w, w/h)*100 + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + elif target == "y2": + value = y2 + elif target == "confidence(0-100)": + value = seg.confidence*100 + else: + raise Exception(f"[Impact Pack] SEGSRangeFilter - Unexpected target '{target}'") + + if mode and min_value <= value <= max_value: + logging.info(f"[in] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + elif not mode and (value < min_value or value > max_value): + logging.info(f"[out] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + else: + remained_segs.append(seg) + logging.info(f"[filter] value={value} / {mode}, {min_value}, {max_value}") + + return (segs[0], new_segs), (segs[0], remained_segs), + + +class SEGSIntersectionFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs1": ("SEGS", ), + "segs2": ("SEGS", ), + "ioa_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("SEGS",) + RETURN_NAMES = ("filtered_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def compute_ioa(self, mask1, mask2): + """Compute Intersection over Area (IoA) between two boxes.""" + inter_mask = utils.bitwise_and_masks(mask1, mask2) + + inter_area = (inter_mask > 0).sum() + area1 = (mask1 > 0).sum() + + return inter_area / area1 if area1 > 0 else 0 + + def doit(self, segs1, segs2, ioa_threshold): + """Remove segments from segs1 if their IoA with any segment in segs2 exceeds the threshold.""" + # Extract bounding boxes for all segments in segs1 and segs2 + keep = [] + + # Iterate over all segments in segs1 + for idx1, seg1 in enumerate(segs1[1]): + keep_segment = True # Assume the segment should be kept + mask1 = core.segs_to_combined_mask((segs1[0], [seg1])) + + # Compare with every segment in segs2 + for seg2 in segs2[1]: + mask2 = core.segs_to_combined_mask((segs2[0], [seg2])) + ioa = self.compute_ioa(mask1, mask2) # IoA between segment 1 and segment 2 + + if ioa > ioa_threshold: # If IoA exceeds the threshold, mark the segment for removal + keep_segment = False + break # If one overlap exceeds threshold, break early and mark for removal + + # Keep the segment if it did not exceed the threshold with any other segment + if keep_segment: + keep.append(segs1[1][idx1]) + + return (segs1[0], keep), # Return the updated SEGS + + +class SEGSNMSFilter: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "segs": ("SEGS",), + "iou_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("SEGS",) + RETURN_NAMES = ("filtered_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def compute_iou(self, mask1, mask2): + """Compute IoU between two bounding boxes (x1, y1, x2, y2).""" + inter_mask = utils.bitwise_and_masks(mask1, mask2) + union_mask = utils.add_masks(mask1, mask2) + + inter_area = (inter_mask > 0).sum() + union_area = (union_mask > 0).sum() + + return inter_area / union_area if union_area > 0 else 0 + + def doit(self, segs, iou_threshold): + """Perform NMS to filter overlapping segments.""" + confidences = np.ndarray.flatten(np.array([seg.confidence for seg in segs[1]])) + + # Sort boxes by confidence (high to low) + sorted_indices = np.argsort(confidences)[::-1].tolist() + keep = [] + + while len(sorted_indices) > 0: + idx = sorted_indices[0] + mask1 = core.segs_to_combined_mask((segs[0], [segs[1][idx]])) + keep.append(idx) + sorted_indices = sorted_indices[1:] + + # Filter indices only contain the indices where the bbox does not intersect + filtered_indices = [] + for i in sorted_indices: + mask2 = core.segs_to_combined_mask((segs[0], [segs[1][i]])) + iou = self.compute_iou(mask1, mask2) + if iou < iou_threshold: + filtered_indices.append(i) + + sorted_indices = np.array(filtered_indices) + + filtered_segs = [segs[1][i] for i in keep] + return (segs[0], filtered_segs), + + +class SEGSToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, fallback_image_opt=None): + results = list() + + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = utils.to_tensor(seg.cropped_image) + elif fallback_image_opt is not None: + # take from original image + cropped_image = utils.to_tensor(utils.crop_image(fallback_image_opt, seg.crop_region)) + else: + cropped_image = utils.empty_pil_tensor() + + results.append(cropped_image) + + if len(results) == 0: + results.append(utils.empty_pil_tensor()) + + return (results,) + + +class SEGSToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + if len(masks) == 0: + empty_mask = torch.zeros(segs[0], dtype=torch.float32, device="cpu") + masks = [empty_mask] + masks = [utils.make_3d_mask(mask) for mask in masks] + return (masks,) + + +class SEGSToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + masks = [utils.make_3d_mask(mask) for mask in masks] + mask_batch = torch.concat(masks) + return (mask_batch,) + + +class SEGSMerge: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "SEGS contains multiple SEGs. SEGS Merge integrates several SEGs into a single merged SEG. The label is changed to `merged` and the confidence becomes the minimum confidence. The applied controlnet and cropped_image are removed." + + def doit(self, segs): + crop_left = sys.maxsize + crop_right = 0 + crop_top = sys.maxsize + crop_bottom = 0 + + bbox_left = sys.maxsize + bbox_right = 0 + bbox_top = sys.maxsize + bbox_bottom = 0 + + min_confidence = 1.0 + + for seg in segs[1]: + cx1 = seg.crop_region[0] + cy1 = seg.crop_region[1] + cx2 = seg.crop_region[2] + cy2 = seg.crop_region[3] + + bx1 = seg.bbox[0] + by1 = seg.bbox[1] + bx2 = seg.bbox[2] + by2 = seg.bbox[3] + + crop_left = min(crop_left, cx1) + crop_top = min(crop_top, cy1) + crop_right = max(crop_right, cx2) + crop_bottom = max(crop_bottom, cy2) + + bbox_left = min(bbox_left, bx1) + bbox_top = min(bbox_top, by1) + bbox_right = max(bbox_right, bx2) + bbox_bottom = max(bbox_bottom, by2) + + min_confidence = min(min_confidence, seg.confidence) + + combined_mask = core.segs_to_combined_mask(segs) + cropped_mask = combined_mask[crop_top:crop_bottom, crop_left:crop_right] + cropped_mask = cropped_mask.unsqueeze(0) + + crop_region = [crop_left, crop_top, crop_right, crop_bottom] + bbox = [bbox_left, bbox_top, bbox_right, bbox_bottom] + + seg = SEG(None, cropped_mask, min_confidence, crop_region, bbox, 'merged', None) + return ((segs[0], [seg]),) + + +class SEGSConcat: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs1": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + dim = None + res = None + + for k, v in list(kwargs.items()): + if v[0] == (0, 0) or len(v[1]) == 0: + continue + + if dim is None: + dim = v[0] + res = v[1] + else: + if v[0] == dim: + res = res + v[1] + else: + logging.error(f"[Impact Pack] source shape of 'segs1'{dim} and '{k}'{v[0]} are different. '{k}' will be ignored") + + if dim is None: + empty_segs = ((0, 0), []) + return (empty_segs, ) + else: + return ((dim, res), ) + + +class Count_Elts_in_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("INT",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + return (len(segs[1]), ) + + +class DecomposeSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS_HEADER", "SEG_ELT",) + OUTPUT_IS_LIST = (False, True, ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + return segs + + +class AssembleSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_header": ("SEGS_HEADER", ), + "seg_elt": ("SEG_ELT", ), + }, + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_header, seg_elt): + return ((seg_header[0], seg_elt), ) + + +class From_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + }, + } + + RETURN_TYPES = ("SEG_ELT", "IMAGE", "MASK", "SEG_ELT_crop_region", "SEG_ELT_bbox", "SEG_ELT_control_net_wrapper", "FLOAT", "STRING") + RETURN_NAMES = ("seg_elt", "cropped_image", "cropped_mask", "crop_region", "bbox", "control_net_wrapper", "confidence", "label") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_elt): + cropped_image = utils.to_tensor(seg_elt.cropped_image) if seg_elt.cropped_image is not None else None + return (seg_elt, cropped_image, utils.to_tensor(seg_elt.cropped_mask), seg_elt.crop_region, seg_elt.bbox, seg_elt.control_net_wrapper, seg_elt.confidence, seg_elt.label,) + + +class From_SEG_ELT_bbox: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox": ("SEG_ELT_bbox", ), + }, + } + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("left", "top", "right", "bottom") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, bbox): + return [int(c) for c in bbox] + + +class From_SEG_ELT_crop_region: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "crop_region": ("SEG_ELT_crop_region", ), + }, + } + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("left", "top", "right", "bottom") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, crop_region): + return crop_region + + +class Edit_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + }, + "optional": { + "cropped_image_opt": ("IMAGE", ), + "cropped_mask_opt": ("MASK", ), + "crop_region_opt": ("SEG_ELT_crop_region", ), + "bbox_opt": ("SEG_ELT_bbox", ), + "control_net_wrapper_opt": ("SEG_ELT_control_net_wrapper", ), + "confidence_opt": ("FLOAT", {"min": 0, "max": 1.0, "step": 0.1, "forceInput": True}), + "label_opt": ("STRING", {"multiline": False, "forceInput": True}), + } + } + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_elt, cropped_image_opt=None, cropped_mask_opt=None, confidence_opt=None, crop_region_opt=None, + bbox_opt=None, label_opt=None, control_net_wrapper_opt=None): + + cropped_image = seg_elt.cropped_image if cropped_image_opt is None else cropped_image_opt + cropped_mask = seg_elt.cropped_mask if cropped_mask_opt is None else cropped_mask_opt + confidence = seg_elt.confidence if confidence_opt is None else confidence_opt + crop_region = seg_elt.crop_region if crop_region_opt is None else crop_region_opt + bbox = seg_elt.bbox if bbox_opt is None else bbox_opt + label = seg_elt.label if label_opt is None else label_opt + control_net_wrapper = seg_elt.control_net_wrapper if control_net_wrapper_opt is None else control_net_wrapper_opt + + cropped_image = cropped_image.numpy() if cropped_image is not None else None + + if isinstance(cropped_mask, torch.Tensor): + if len(cropped_mask.shape) == 3: + cropped_mask = cropped_mask.squeeze(0) + + cropped_mask = cropped_mask.numpy() + + seg = SEG(cropped_image, cropped_mask, confidence, crop_region, bbox, label, control_net_wrapper) + + return (seg,) + + +class DilateMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("MASK", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, mask, dilation): + mask = utils.dilate_mask(mask.numpy(), dilation) + mask = torch.from_numpy(mask) + mask = utils.make_3d_mask(mask) + return (mask, ) + + +class GaussianBlurMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "kernel_size": ("INT", {"default": 10, "min": 0, "max": 100, "step": 1}), + "sigma": ("FLOAT", {"default": 10.0, "min": 0.1, "max": 100.0, "step": 0.1}), + }} + + RETURN_TYPES = ("MASK", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, mask, kernel_size, sigma): + # Some custom nodes use abnormal 4-dimensional masks in the format of b, c, h, w. In the impact pack, internal 4-dimensional masks are required in the format of b, h, w, c. Therefore, normalization is performed using the normal mask format, which is 3-dimensional, before proceeding with the operation. + mask = utils.make_3d_mask(mask) + mask = torch.unsqueeze(mask, dim=-1) + mask = utils.tensor_gaussian_blur_mask(mask, kernel_size, sigma) + mask = torch.squeeze(mask, dim=-1) + return (mask, ) + + +class DilateMaskInSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, dilation): + new_segs = [] + for seg in segs[1]: + mask = utils.dilate_mask(seg.cropped_mask, dilation) + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(seg) + + return ((segs[0], new_segs), ) + + +class GaussianBlurMaskInSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "kernel_size": ("INT", {"default": 10, "min": 0, "max": 100, "step": 1}), + "sigma": ("FLOAT", {"default": 10.0, "min": 0.1, "max": 100.0, "step": 0.1}), + }} + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, kernel_size, sigma): + new_segs = [] + for seg in segs[1]: + mask = utils.tensor_gaussian_blur_mask(seg.cropped_mask, kernel_size, sigma) + mask = torch.squeeze(mask, dim=-1).squeeze(0).numpy() + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + new_segs.append(seg) + + return ((segs[0], new_segs), ) + + +class Dilate_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg, dilation): + mask = utils.dilate_mask(seg.cropped_mask, dilation) + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + return (seg,) + + +class SEG_ELT_BBOX_ScaleBy: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg": ("SEG_ELT", ), + "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}), } + } + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def fill_zero_outside_bbox(mask, crop_region, bbox): + cx1, cy1, _, _ = crop_region + x1, y1, x2, y2 = bbox + x1, y1, x2, y2 = x1-cx1, y1-cy1, x2-cx1, y2-cy1 + h, w = mask.shape + + x1 = int(min(w-1, max(0, x1))) + x2 = int(min(w-1, max(0, x2))) + y1 = int(min(h-1, max(0, y1))) + y2 = int(min(h-1, max(0, y2))) + + mask_cropped = mask.copy() + mask_cropped[:, :x1] = 0 # zero fill left side + mask_cropped[:, x2:] = 0 # zero fill right side + mask_cropped[:y1, :] = 0 # zero fill top side + mask_cropped[y2:, :] = 0 # zero fill bottom side + return mask_cropped + + def doit(self, seg, scale_by): + x1, y1, x2, y2 = seg.bbox + w = x2-x1 + h = y2-y1 + + dw = int((w * scale_by - w)/2) + dh = int((h * scale_by - h)/2) + + bbox = (x1-dw, y1-dh, x2+dw, y2+dh) + + cropped_mask = SEG_ELT_BBOX_ScaleBy.fill_zero_outside_bbox(seg.cropped_mask, seg.crop_region, bbox) + seg = SEG(seg.cropped_image, cropped_mask, seg.confidence, seg.crop_region, bbox, seg.label, seg.control_net_wrapper) + return (seg,) + + +class EmptySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}, } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self): + shape = 0, 0 + return ((shape, []),) + + +class SegsToCombinedMask: + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS",), }} + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs): + mask = core.segs_to_combined_mask(segs) + mask = utils.make_3d_mask(mask) + return (mask,) + + +class MediaPipeFaceMeshToSEGS: + @classmethod + def INPUT_TYPES(s): + bool_true_widget = ("BOOLEAN", {"default": True, "label_on": "Enabled", "label_off": "Disabled"}) + bool_false_widget = ("BOOLEAN", {"default": False, "label_on": "Enabled", "label_off": "Disabled"}) + return {"required": { + "image": ("IMAGE",), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "crop_min_size": ("INT", {"min": 10, "max": MAX_RESOLUTION, "step": 1, "default": 50}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 1}), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "face": bool_true_widget, + "mouth": bool_false_widget, + "left_eyebrow": bool_false_widget, + "left_eye": bool_false_widget, + "left_pupil": bool_false_widget, + "right_eyebrow": bool_false_widget, + "right_eye": bool_false_widget, + "right_pupil": bool_false_widget, + }, + # "optional": {"reference_image_opt": ("IMAGE", ), } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + # padding is obsolete now + # https://github.com/Fannovel16/comfyui_controlnet_aux/blob/1ec41fceff1ee99596445a0c73392fd91df407dc/utils.py#L33 + # def calc_pad(h_raw, w_raw): + # resolution = normalize_size_base_64(h_raw, w_raw) + # + # def pad64(x): + # return int(np.ceil(float(x) / 64.0) * 64 - x) + # + # k = float(resolution) / float(min(h_raw, w_raw)) + # h_target = int(np.round(float(h_raw) * k)) + # w_target = int(np.round(float(w_raw) * k)) + # + # return pad64(h_target), pad64(w_target) + + # if reference_image_opt is not None: + # if image.shape[1:] != reference_image_opt.shape[1:]: + # scale_by1 = reference_image_opt.shape[1] / image.shape[1] + # scale_by2 = reference_image_opt.shape[2] / image.shape[2] + # scale_by = min(scale_by1, scale_by2) + # + # # padding is obsolete now + # # h_pad, w_pad = calc_pad(reference_image_opt.shape[1], reference_image_opt.shape[2]) + # # if h_pad != 0: + # # # height padded + # # image = image[:, :-h_pad, :, :] + # # elif w_pad != 0: + # # # width padded + # # image = image[:, :, :-w_pad, :] + # + # image = nodes.ImageScaleBy().upscale(image, "bilinear", scale_by)[0] + + result = core.mediapipe_facemesh_to_segs(image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil) + return (result, ) + + +class MaskToSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "contour_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + @staticmethod + def doit(mask, combined, crop_factor, bbox_fill, drop_size, contour_fill=False): + mask = utils.make_2d_mask(mask) + result = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + + return (result, ) + + +class MaskToSEGS_for_AnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "contour_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + @staticmethod + def doit(mask, combined, crop_factor, bbox_fill, drop_size, contour_fill=False): + if (len(mask.shape) == 4 and mask.shape[1] > 1) or (len(mask.shape) == 3 and mask.shape[0] > 1): + mask = utils.make_3d_mask(mask) + if contour_fill: + logging.info("[Impact Pack] MaskToSEGS_for_AnimateDiff: 'contour_fill' is ignored because batch mask 'contour_fill' is not supported.") + result = core.batch_mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size) + return (result, ) + + mask = utils.make_2d_mask(mask) + segs = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + all_masks = SEGSToMaskList().doit(segs)[0] + + result_mask = (all_masks[0] * 255).to(torch.uint8) + for mask in all_masks[1:]: + result_mask |= (mask * 255).to(torch.uint8) + + result_mask = (result_mask/255.0).to(torch.float32) + result_mask = utils.to_binary_mask(result_mask, 0.1)[0] + + return MaskToSEGS.doit(result_mask, False, crop_factor, False, drop_size, contour_fill) + + +class IPAdapterApplySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "ipadapter_pipe": ("IPADAPTER_PIPE",), + "weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}), + "noise": ("FLOAT", {"default": 0.4, "min": 0.0, "max": 1.0, "step": 0.01}), + "weight_type": (["original", "linear", "channel penalty"], {"default": 'channel penalty'}), + "start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_at": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 1.0, "step": 0.001}), + "unfold_batch": ("BOOLEAN", {"default": False}), + "faceid_v2": ("BOOLEAN", {"default": False}), + "weight_v2": ("FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}), + "context_crop_factor": ("FLOAT", {"default": 1.2, "min": 1.0, "max": 100, "step": 0.1}), + "reference_image": ("IMAGE",), + }, + "optional": { + "combine_embeds": (["concat", "add", "subtract", "average", "norm average"],), + "neg_image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, ipadapter_pipe, weight, noise, weight_type, start_at, end_at, unfold_batch, faceid_v2, weight_v2, context_crop_factor, reference_image, combine_embeds="concat", neg_image=None): + + if len(ipadapter_pipe) == 4: + logging.info("[Impact Pack] IPAdapterApplySEGS: Installed Inspire Pack is outdated.") + raise Exception("Inspire Pack is outdated.") + + new_segs = [] + + h, w = segs[0] + + if reference_image.shape[2] != w or reference_image.shape[1] != h: + reference_image = utils.tensor_resize(reference_image, w, h) + + for seg in segs[1]: + # The context_crop_region sets how much wider the IPAdapter context will reflect compared to the crop_region, not the bbox + context_crop_region = utils.make_crop_region(w, h, seg.crop_region, context_crop_factor) + cropped_image = utils.crop_image(reference_image, context_crop_region) + + control_net_wrapper = core.IPAdapterWrapper(ipadapter_pipe, weight, noise, weight_type, start_at, end_at, unfold_batch, weight_v2, cropped_image, neg_image=neg_image, prev_control_net=seg.control_net_wrapper, combine_embeds=combine_embeds) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class ControlNetApplySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "control_net": ("CONTROL_NET",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }, + "optional": { + "segs_preprocessor": ("SEGS_PREPROCESSOR",), + "control_image": ("IMAGE",) + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + DEPRECATED = True + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, control_net, strength, segs_preprocessor=None, control_image=None): + new_segs = [] + + for seg in segs[1]: + control_net_wrapper = core.ControlNetWrapper(control_net, strength, segs_preprocessor, seg.control_net_wrapper, + original_size=segs[0], crop_region=seg.crop_region, control_image=control_image) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class ControlNetApplyAdvancedSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "control_net": ("CONTROL_NET",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) + }, + "optional": { + "segs_preprocessor": ("SEGS_PREPROCESSOR",), + "control_image": ("IMAGE",), + "vae": ("VAE",) + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs, control_net, strength, start_percent, end_percent, segs_preprocessor=None, control_image=None, vae=None): + new_segs = [] + + for seg in segs[1]: + control_net_wrapper = core.ControlNetAdvancedWrapper(control_net, strength, start_percent, end_percent, segs_preprocessor, + seg.control_net_wrapper, original_size=segs[0], crop_region=seg.crop_region, + control_image=control_image, vae=vae) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class ControlNetClearSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS",), }, } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs): + new_segs = [] + + for seg in segs[1]: + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class SEGSSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), + "segs1": ("SEGS",), + }, + } + + RETURN_TYPES = ("SEGS", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + input_name = f"segs{int(kwargs['select'])}" + + if input_name in kwargs: + return (kwargs[input_name],) + else: + logging.info("SEGSSwitch: invalid select index ('segs1' is selected)") + return (kwargs['segs1'],) + + +class SEGSPicker: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "picks": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}), + "segs": ("SEGS",), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "This node provides a function to select only the chosen SEGS from the input SEGS." + + @staticmethod + def doit(picks, segs, fallback_image_opt=None, unique_id=None): + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + # generate candidates image + cands = [] + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif fallback_image_opt is not None: + # take from original image + cropped_image = utils.crop_image(fallback_image_opt, seg.crop_region) + else: + cropped_image = utils.empty_pil_tensor() + + mask_array = seg.cropped_mask.copy() + mask_array[mask_array < 0.3] = 0.3 + mask_array = mask_array[None, ..., None] + cropped_image = cropped_image * mask_array + + cands.append(cropped_image) + + impact.impact_server.segs_picker_map[unique_id] = cands + + # pass only selected + pick_ids = set() + + for pick in picks.split(","): + try: + pick_ids.add(int(pick)-1) + except Exception: + pass + + new_segs = [] + for i in pick_ids: + if 0 <= i < len(segs[1]): + new_segs.append(segs[1][i]) + + return ((segs[0], new_segs),) + + +class DefaultImageForSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "image": ("IMAGE", ), + "override": ("BOOLEAN", {"default": True}), + }} + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "If the SEGS have not passed through the detailer, they contain only detection area information without an image. This node sets a default image for the SEGS." + + @staticmethod + def doit(segs, image, override): + results = [] + + segs = core.segs_scale_match(segs, image.shape) + + if len(segs[1]) > 0: + if segs[1][0].cropped_image is not None: + batch_count = len(segs[1][0].cropped_image) + else: + batch_count = len(image) + + for seg in segs[1]: + if seg.cropped_image is not None and not override: + cropped_image = seg.cropped_image + else: + cropped_image = None + for i in range(0, batch_count): + # take from original image + ref_image = image[i].unsqueeze(0) + cropped_image2 = utils.crop_image(ref_image, seg.crop_region) + + if cropped_image is None: + cropped_image = cropped_image2 + else: + cropped_image = torch.cat((cropped_image, cropped_image2), dim=0) + + new_seg = SEG(cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + results.append(new_seg) + + return ((segs[0], results), ) + else: + return (segs, ) + + +class RemoveImageFromSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {"segs": ("SEGS", ), }} + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(segs): + results = [] + + if len(segs[1]) > 0: + for seg in segs[1]: + new_seg = SEG(None, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + results.append(new_seg) + + return ((segs[0], results), ) + else: + return (segs, ) + + +class MakeTileSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "bbox_size": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 8}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.01}), + "min_overlap": ("INT", {"default": 5, "min": 0, "max": 512, "step": 1}), + "filter_segs_dilation": ("INT", {"default": 20, "min": -255, "max": 255, "step": 1}), + "mask_irregularity": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}), + "irregular_mask_mode": (["Reuse fast", "Reuse quality", "All random fast", "All random quality"],) + }, + "optional": { + "filter_in_segs_opt": ("SEGS", ), + "filter_out_segs_opt": ("SEGS", ), + } + } + + RETURN_TYPES = ("SEGS",) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/__for_testing" + + @staticmethod + def doit(images, bbox_size, crop_factor, min_overlap, filter_segs_dilation, mask_irregularity=0, irregular_mask_mode="Reuse fast", filter_in_segs_opt=None, filter_out_segs_opt=None): + if bbox_size <= 2*min_overlap: + new_min_overlap = bbox_size / 2 + logging.info(f"[MakeTileSEGS] min_overlap should be greater than bbox_size. (value changed: {min_overlap} => {new_min_overlap})") + min_overlap = new_min_overlap + + _, ih, iw, _ = images.size() + + mask_cache = None + mask_quality = 512 + if mask_irregularity > 0: + if irregular_mask_mode == "Reuse fast": + mask_quality = 128 + mask_cache = np.zeros((128, 128)).astype(np.float32) + core.random_mask(mask_cache, (0, 0, 128, 128), factor=mask_irregularity, size=mask_quality) + elif irregular_mask_mode == "Reuse quality": + mask_quality = 512 + mask_cache = np.zeros((512, 512)).astype(np.float32) + core.random_mask(mask_cache, (0, 0, 512, 512), factor=mask_irregularity, size=mask_quality) + elif irregular_mask_mode == "All random fast": + mask_quality = 512 + + # compensate overlap/bbox_size for irregular mask + if mask_irregularity > 0: + compensate = max(6, int(mask_quality * mask_irregularity / 4)) + min_overlap += compensate + bbox_size += compensate*2 + + # create exclusion mask + if filter_out_segs_opt is not None: + exclusion_mask = core.segs_to_combined_mask(filter_out_segs_opt) + exclusion_mask = utils.make_3d_mask(exclusion_mask) + exclusion_mask = utils.resize_mask(exclusion_mask, (ih, iw)) + exclusion_mask = utils.dilate_mask(exclusion_mask.cpu().numpy(), filter_segs_dilation) + else: + exclusion_mask = None + + if filter_in_segs_opt is not None: + and_mask = core.segs_to_combined_mask(filter_in_segs_opt) + and_mask = utils.make_3d_mask(and_mask) + and_mask = utils.resize_mask(and_mask, (ih, iw)) + and_mask = utils.dilate_mask(and_mask.cpu().numpy(), filter_segs_dilation) + + a, b = core.mask_to_segs(and_mask, True, 1.0, False, 0) + if len(b) == 0: + return ((a, b),) + + start_x, start_y, c, d = b[0].crop_region + w = c - start_x + h = d - start_y + else: + start_x = 0 + start_y = 0 + h, w = ih, iw + and_mask = None + + # calculate tile factors + if bbox_size > h or bbox_size > w: + new_bbox_size = min(bbox_size, min(w, h)) + logging.info(f"[MaskTileSEGS] bbox_size is greater than resolution (value changed: {bbox_size} => {new_bbox_size}") + bbox_size = new_bbox_size + + n_horizontal = math.ceil(w / (bbox_size - min_overlap)) + n_vertical = math.ceil(h / (bbox_size - min_overlap)) + + w_overlap_sum = (bbox_size * n_horizontal) - w + if w_overlap_sum < 0: + n_horizontal += 1 + w_overlap_sum = (bbox_size * n_horizontal) - w + + w_overlap_size = 0 if n_horizontal == 1 else int(w_overlap_sum/(n_horizontal-1)) + + h_overlap_sum = (bbox_size * n_vertical) - h + if h_overlap_sum < 0: + n_vertical += 1 + h_overlap_sum = (bbox_size * n_vertical) - h + + h_overlap_size = 0 if n_vertical == 1 else int(h_overlap_sum/(n_vertical-1)) + + new_segs = [] + + if w_overlap_size == bbox_size: + n_horizontal = 1 + + if h_overlap_size == bbox_size: + n_vertical = 1 + + y = start_y + for j in range(0, n_vertical): + x = start_x + for i in range(0, n_horizontal): + x1 = x + y1 = y + + if x+bbox_size < iw-1: + x2 = x+bbox_size + else: + x2 = iw + x1 = iw-bbox_size + + if y+bbox_size < ih-1: + y2 = y+bbox_size + else: + y2 = ih + y1 = ih-bbox_size + + bbox = x1, y1, x2, y2 + crop_region = utils.make_crop_region(iw, ih, bbox, crop_factor) + cx1, cy1, cx2, cy2 = crop_region + + mask = np.zeros((cy2 - cy1, cx2 - cx1)).astype(np.float32) + + rel_left = x1 - cx1 + rel_top = y1 - cy1 + rel_right = x2 - cx1 + rel_bot = y2 - cy1 + + if mask_irregularity > 0: + if mask_cache is not None: + core.adaptive_mask_paste(mask, mask_cache, (rel_left, rel_top, rel_right, rel_bot)) + else: + core.random_mask(mask, (rel_left, rel_top, rel_right, rel_bot), factor=mask_irregularity, size=mask_quality) + + # corner filling + if rel_left == 0: + pad = int((x2 - x1) / 8) + mask[rel_top:rel_bot, :pad] = 1.0 + + if rel_top == 0: + pad = int((y2 - y1) / 8) + mask[:pad, rel_left:rel_right] = 1.0 + + if rel_right == mask.shape[1]: + pad = int((x2 - x1) / 8) + mask[rel_top:rel_bot, -pad:] = 1.0 + + if rel_bot == mask.shape[0]: + pad = int((y2 - y1) / 8) + mask[-pad:, rel_left:rel_right] = 1.0 + else: + mask[rel_top:rel_bot, rel_left:rel_right] = 1.0 + + mask = torch.tensor(mask) + + if exclusion_mask is not None: + exclusion_mask_cropped = exclusion_mask[cy1:cy2, cx1:cx2] + mask[exclusion_mask_cropped != 0] = 0.0 + + if and_mask is not None: + and_mask_cropped = and_mask[cy1:cy2, cx1:cx2] + mask[and_mask_cropped == 0] = 0.0 + + is_mask_zero = torch.all(mask == 0.0).item() + + if not is_mask_zero: + item = SEG(None, mask.numpy(), 1.0, crop_region, bbox, "", None) + new_segs.append(item) + + x += bbox_size - w_overlap_size + y += bbox_size - h_overlap_size + + res = (ih, iw), new_segs # segs + return (res,) + + +class SEGSUpscaler: + @classmethod + def INPUT_TYPES(s): + resampling_methods = ["lanczos", "nearest", "bilinear", "bicubic"] + + return {"required": { + "image": ("IMAGE",), + "segs": ("SEGS",), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "rescale_factor": ("FLOAT", {"default": 2, "min": 0.01, "max": 100.0, "step": 0.01}), + "resampling_method": (resampling_methods,), + "supersample": (["true", "false"],), + "rounding_modulus": ("INT", {"default": 8, "min": 8, "max": 1024, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL",), + "upscaler_hook_opt": ("UPSCALER_HOOK",), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + @staticmethod + def doit(image, segs, model, clip, vae, rescale_factor, resampling_method, supersample, rounding_modulus, + seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, inpaint_model, noise_mask_feather, + upscale_model_opt=None, upscaler_hook_opt=None, scheduler_func_opt=None): + + new_image = segs_upscaler.upscaler(image, upscale_model_opt, rescale_factor, resampling_method, supersample, rounding_modulus) + + segs = core.segs_scale_match(segs, new_image.shape) + + ordered_segs = segs[1] + + for i, seg in enumerate(ordered_segs): + cropped_image = utils.crop_ndarray4(new_image.numpy(), seg.crop_region) + cropped_image = utils.to_tensor(cropped_image) + mask = utils.to_tensor(seg.cropped_mask) + mask = utils.tensor_gaussian_blur_mask(mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + logging.info("SEGSUpscaler: segment skip [empty mask]") + continue + + cropped_mask = seg.cropped_mask + + seg_seed = seed + i + + enhanced_image = segs_upscaler.img2img_segs(cropped_image, model, clip, vae, seg_seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, + noise_mask=cropped_mask, control_net_wrapper=seg.control_net_wrapper, + inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) + if enhanced_image is not None: + new_image = new_image.cpu() + enhanced_image = enhanced_image.cpu() + left = seg.crop_region[0] + top = seg.crop_region[1] + utils.tensor_paste(new_image, enhanced_image, (left, top), mask) + + if upscaler_hook_opt is not None: + new_image = upscaler_hook_opt.post_paste(new_image) + + enhanced_img = utils.tensor_convert_rgb(new_image) + + return (enhanced_img,) + + +class SEGSUpscalerPipe: + @classmethod + def INPUT_TYPES(s): + resampling_methods = ["lanczos", "nearest", "bilinear", "bicubic"] + + return {"required": { + "image": ("IMAGE",), + "segs": ("SEGS",), + "basic_pipe": ("BASIC_PIPE",), + "rescale_factor": ("FLOAT", {"default": 2, "min": 0.01, "max": 100.0, "step": 0.01}), + "resampling_method": (resampling_methods,), + "supersample": (["true", "false"],), + "rounding_modulus": ("INT", {"default": 8, "min": 8, "max": 1024, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (core.get_schedulers(),), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL",), + "upscaler_hook_opt": ("UPSCALER_HOOK",), + "scheduler_func_opt": ("SCHEDULER_FUNC",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + @staticmethod + def doit(image, segs, basic_pipe, rescale_factor, resampling_method, supersample, rounding_modulus, + seed, steps, cfg, sampler_name, scheduler, denoise, feather, inpaint_model, noise_mask_feather, + upscale_model_opt=None, upscaler_hook_opt=None, scheduler_func_opt=None): + + model, clip, vae, positive, negative = basic_pipe + + return SEGSUpscaler.doit(image, segs, model, clip, vae, rescale_factor, resampling_method, supersample, rounding_modulus, + seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, feather, inpaint_model, noise_mask_feather, + upscale_model_opt=upscale_model_opt, upscaler_hook_opt=upscaler_hook_opt, scheduler_func_opt=scheduler_func_opt) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py new file mode 100644 index 0000000000000000000000000000000000000000..e622536d961b796fe82b7717033f4938bd0b6339 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py @@ -0,0 +1,140 @@ +from impact import impact_sampling +from comfy import model_management +from impact import utils +from PIL import Image +import nodes +import torch +import inspect +import logging +import comfy + +try: + from comfy_extras import nodes_differential_diffusion +except Exception: + logging.info("[Impact Pack] ComfyUI is an outdated version. The DifferentialDiffusion feature will be disabled.") + + +# Implementation based on `https://github.com/lingondricka2/Upscaler-Detailer` + +# code from comfyroll ---> +# https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/blob/main/nodes/functions_upscale.py + +def upscale_with_model(upscale_model, image): + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = image.movedim(-1, -3).to(device) + + tile = 512 + overlap = 32 + + oom = True + while oom: + try: + steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap) + pbar = comfy.utils.ProgressBar(steps) + s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar) + oom = False + except model_management.OOM_EXCEPTION as e: + tile //= 2 + if tile < 128: + raise e + + s = torch.clamp(s.movedim(-3, -1), min=0, max=1.0) + return s + + +def apply_resize_image(image: Image.Image, original_width, original_height, rounding_modulus, mode='scale', supersample='true', factor: int = 2, width: int = 1024, height: int = 1024, + resample='bicubic'): + # Calculate the new width and height based on the given mode and parameters + if mode == 'rescale': + new_width, new_height = int(original_width * factor), int(original_height * factor) + else: + m = rounding_modulus + original_ratio = original_height / original_width + height = int(width * original_ratio) + + new_width = width if width % m == 0 else width + (m - width % m) + new_height = height if height % m == 0 else height + (m - height % m) + + # Define a dictionary of resampling filters + resample_filters = {'nearest': 0, 'bilinear': 2, 'bicubic': 3, 'lanczos': 1} + + # Apply supersample + if supersample == 'true': + image = image.resize((new_width * 8, new_height * 8), resample=Image.Resampling(resample_filters[resample])) + + # Resize the image using the given resampling filter + resized_image = image.resize((new_width, new_height), resample=Image.Resampling(resample_filters[resample])) + + return resized_image + + +def upscaler(image, upscale_model, rescale_factor, resampling_method, supersample, rounding_modulus): + if upscale_model is not None: + up_image = upscale_with_model(upscale_model, image) + else: + up_image = image + + pil_img = utils.tensor2pil(image) + original_width, original_height = pil_img.size + scaled_image = utils.pil2tensor(apply_resize_image(utils.tensor2pil(up_image), original_width, original_height, rounding_modulus, 'rescale', + supersample, rescale_factor, 1024, resampling_method)) + return scaled_image + +# <--- + + +def img2img_segs(image, model, clip, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, noise_mask, control_net_wrapper=None, + inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None): + + original_image_size = image.shape[1:3] + + # Match to original image size + if original_image_size[0] % 8 > 0 or original_image_size[1] % 8 > 0: + scale = 8/min(original_image_size[0], original_image_size[1]) + 1 + w = int(original_image_size[1] * scale) + h = int(original_image_size[0] * scale) + image = utils.tensor_resize(image, w, h) + + if noise_mask is not None: + noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather) + noise_mask = noise_mask.squeeze(3) + + if noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: + model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0] + + if control_net_wrapper is not None: + positive, negative, _ = control_net_wrapper.apply(positive, negative, image, noise_mask) + + # prepare mask + if noise_mask is not None and inpaint_model: + imc_encode = nodes.InpaintModelConditioning().encode + if 'noise_mask' in inspect.signature(imc_encode).parameters: + positive, negative, latent_image = imc_encode(positive, negative, image, vae, mask=noise_mask, noise_mask=True) + else: + logging.info("[Impact Pack] ComfyUI is an outdated version.") + positive, negative, latent_image = imc_encode(positive, negative, image, vae, noise_mask) + else: + latent_image = utils.to_latent_image(image, vae) + if noise_mask is not None: + latent_image['noise_mask'] = noise_mask + + refined_latent = latent_image + + # ksampler + refined_latent = impact_sampling.ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, refined_latent, denoise, scheduler_func=scheduler_func_opt) + + # non-latent downscale - latent downscale cause bad quality + refined_image = vae.decode(refined_latent['samples']) + + # prevent mixing of device + refined_image = refined_image.cpu() + + # Match to original image size + if refined_image.shape[1:3] != original_image_size: + refined_image = utils.tensor_resize(refined_image, original_image_size[1], original_image_size[0]) + + # don't convert to latent - latent break image + # preserving pil is much better + return refined_image diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..b6449ad4583e799af2cbece88849dca4deec6edf --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py @@ -0,0 +1,686 @@ +import math +import impact.core as core +from comfy_extras.nodes_custom_sampler import Noise_RandomNoise +from nodes import MAX_RESOLUTION +import nodes +from impact.impact_sampling import KSamplerWrapper, KSamplerAdvancedWrapper, separated_sample, impact_sample +import comfy +import torch +import numpy as np +import logging + + +class TiledKSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"tooltip": "noise schedule"}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64, "tooltip": "Sets the width of the tile to be used in TiledKSampler."}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64, "tooltip": "Sets the height of the tile to be used in TiledKSampler."}), + "tiling_strategy": (["random", "padded", 'simple'], {"tooltip": "Sets the tiling strategy for TiledKSampler."} ), + "basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}) + }} + + OUTPUT_TOOLTIPS = ("sampler wrapper. (Can be used when generating a regional_prompt.)", ) + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(seed, steps, cfg, sampler_name, scheduler, denoise, + tile_width, tile_height, tiling_strategy, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.TiledKSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy) + return (sampler, ) + + +class KSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}), + "scheduler": (core.get_schedulers(), {"tooltip": "noise schedule"}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}), + "basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}) + }, + "optional": { + "scheduler_func_opt": ("SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}), + } + } + + OUTPUT_TOOLTIPS = ("sampler wrapper. (Can be used when generating a regional_prompt.)",) + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(seed, steps, cfg, sampler_name, scheduler, denoise, basic_pipe, scheduler_func_opt=None): + model, _, _, positive, negative = basic_pipe + sampler = KSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, scheduler_func=scheduler_func_opt) + return (sampler, ) + + +class KSamplerAdvancedProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "toolip": "classifier free guidance value"}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"toolip": "sampler"}), + "scheduler": (core.get_schedulers(), {"toolip": "noise schedule"}), + "sigma_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "toolip": "Multiplier of noise schedule"}), + "basic_pipe": ("BASIC_PIPE", {"toolip": "basic_pipe input for sampling"}) + }, + "optional": { + "sampler_opt": ("SAMPLER", {"toolip": "[OPTIONAL] Uses the passed sampler instead of internal impact_sampler."}), + "scheduler_func_opt": ("SCHEDULER_FUNC", {"toolip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}), + } + } + + OUTPUT_TOOLTIPS = ("sampler wrapper. (Can be used when generating a regional_prompt.)", ) + + RETURN_TYPES = ("KSAMPLER_ADVANCED",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(cfg, sampler_name, scheduler, basic_pipe, sigma_factor=1.0, sampler_opt=None, scheduler_func_opt=None): + model, _, _, positive, negative = basic_pipe + sampler = KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative, sampler_opt=sampler_opt, sigma_factor=sigma_factor, scheduler_func=scheduler_func_opt) + return (sampler, ) + + +class TwoSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent_image": ("LATENT", {"tooltip": "input latent image"}), + "base_sampler": ("KSAMPLER", {"tooltip": "Sampler to apply to the region outside the mask."}), + "mask_sampler": ("KSAMPLER", {"tooltip": "Sampler to apply to the masked region."}), + "mask": ("MASK", {"tooltip": "region mask"}) + }, + } + + OUTPUT_TOOLTIPS = ("result latent", ) + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(latent_image, base_sampler, mask_sampler, mask): + inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + + latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample(latent_image) + + new_latent_image['noise_mask'] = mask + new_latent_image = mask_sampler.sample(new_latent_image) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class TwoAdvancedSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}), + "samples": ("LATENT", {"tooltip": "input latent image"}), + "base_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "Sampler to apply to the region outside the mask."}), + "mask_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "Sampler to apply to the masked region."}), + "mask": ("MASK", {"tooltip": "region mask"}), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask by the overlap_factor amount to overlap with other regions."}) + }, + } + + OUTPUT_TOOLTIPS = ("result latent", ) + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def doit(seed, steps, denoise, samples, base_sampler, mask_sampler, mask, overlap_factor): + regional_prompts = RegionalPrompt().doit(mask=mask, advanced_sampler=mask_sampler)[0] + + return RegionalSampler().doit(seed=seed, seed_2nd=0, seed_2nd_mode="ignore", steps=steps, base_only_steps=1, + denoise=denoise, samples=samples, base_sampler=base_sampler, + regional_prompts=regional_prompts, overlap_factor=overlap_factor, + restore_latent=True, additional_mode="ratio between", + additional_sampler="AUTO", additional_sigma_ratio=0.3) + + +class RegionalPrompt: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", {"tooltip": "region mask"}), + "advanced_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "sampler for specified region"}), + }, + "optional": { + "variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Sets the extra seed to be used for noise variation."}), + "variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Sets the strength of the noise variation."}), + "variation_method": (["linear", "slerp"], {"tooltip": "Sets how the original noise and extra noise are blended together."}), + } + } + + OUTPUT_TOOLTIPS = ("regional prompts. (Can be used in the RegionalSampler.)", ) + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def doit(mask, advanced_sampler, variation_seed=0, variation_strength=0.0, variation_method="linear"): + regional_prompt = core.REGIONAL_PROMPT(mask, advanced_sampler, variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method) + return ([regional_prompt], ) + + +class CombineRegionalPrompts: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "regional_prompts1": ("REGIONAL_PROMPTS", {"tooltip": "input regional_prompts. (Connecting to the input slot increases the number of additional slots.)"}), + }, + } + + OUTPUT_TOOLTIPS = ("Combined REGIONAL_PROMPTS", ) + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def doit(**kwargs): + res = [] + for k, v in kwargs.items(): + res += v + + return (res, ) + + +class CombineConditionings: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning1": ("CONDITIONING", { "tooltip": "input conditionings. (Connecting to the input slot increases the number of additional slots.)" }), + }, + } + + OUTPUT_TOOLTIPS = ("Combined conditioning", ) + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(**kwargs): + res = [] + for k, v in kwargs.items(): + res += v + + return (res, ) + + +class ConcatConditionings: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning1": ("CONDITIONING", { "tooltip": "input conditionings. (Connecting to the input slot increases the number of additional slots.)" }), + }, + } + + OUTPUT_TOOLTIPS = ("Concatenated conditioning", ) + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def doit(**kwargs): + conditioning_to = list(kwargs.values())[0] + + for k, conditioning_from in list(kwargs.items())[1:]: + out = [] + if len(conditioning_from) > 1: + logging.warning("Warning: ConcatConditionings {k} contains more than 1 cond, only the first one will actually be applied to conditioning1.") + + cond_from = conditioning_from[0][0] + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + tw = torch.cat((t1, cond_from), 1) + n = [tw, conditioning_to[i][1].copy()] + out.append(n) + + conditioning_to = out + + return (out, ) + + +class RegionalSampler: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "seed_2nd": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Additional noise seed. The behavior is determined by seed_2nd_mode."}), + "seed_2nd_mode": (["ignore", "fixed", "seed+seed_2nd", "seed-seed_2nd", "increment", "decrement", "randomize"], {"tooltip": "application method of seed_2nd. 1) ignore: Do not use seed_2nd. In the base only sampling stage, the seed is applied as a noise seed, and in the regional sampling stage, denoising is performed as it is without additional noise. 2) Others: In the base only sampling stage, the seed is applied as a noise seed, and once it is closed so that there is no leftover noise, new noise is added with seed_2nd and the regional samping stage is performed. a) fixed: Use seed_2nd as it is as an additional noise seed. b) seed+seed_2nd: Apply the value of seed+seed_2nd as an additional noise seed. c) seed-seed_2nd: Apply the value of seed-seed_2nd as an additional noise seed. d) increment: Not implemented yet. Same with fixed. e) decrement: Not implemented yet. Same with fixed. f) randomize: Not implemented yet. Same with fixed."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "base_only_steps": ("INT", {"default": 2, "min": 0, "max": 10000, "tooltip": "total sampling steps"}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}), + "samples": ("LATENT", {"tooltip": "input latent image"}), + "base_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "The sampler applied outside the area set by the regional_prompt."}), + "regional_prompts": ("REGIONAL_PROMPTS", {"tooltip": "The prompt applied to each region"}), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions."}), + "restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled", "tooltip": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true."}), + "additional_mode": (["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between", "tooltip": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change."}), + "additional_sampler": (["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"], {"tooltip": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler."}), + "additional_sigma_ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Multiplier of noise schedule to be applied according to additional_mode."}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + OUTPUT_TOOLTIPS = ("result latent", ) + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def separated_sample(*args, **kwargs): + return separated_sample(*args, **kwargs) + + @staticmethod + def mask_erosion(samples, mask, grow_mask_by): + mask = mask.clone() + + w = samples['samples'].shape[3] + h = samples['samples'].shape[2] + + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear") + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round() + + @staticmethod + def doit(seed, seed_2nd, seed_2nd_mode, steps, base_only_steps, denoise, samples, base_sampler, regional_prompts, overlap_factor, restore_latent, + additional_mode, additional_sampler, additional_sigma_ratio, unique_id=None): + + samples = samples.copy() + samples['samples'] = comfy.sample.fix_empty_latent_channels(base_sampler.params[0], samples['samples']) + + if restore_latent: + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + else: + latent_compositor = None + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + adv_steps = int(steps / denoise) + start_at_step = adv_steps - steps + + region_len = len(regional_prompts) + total = steps*region_len + + leftover_noise = False + if base_only_steps > 0: + if seed_2nd_mode == 'ignore': + leftover_noise = True + + noise = Noise_RandomNoise(seed).generate_noise(samples) + + for rp in regional_prompts: + noise = rp.touch_noise(noise) + + samples = base_sampler.sample_advanced(True, seed, adv_steps, samples, start_at_step, start_at_step + base_only_steps, leftover_noise, recovery_mode="DISABLE", noise=noise) + + if seed_2nd_mode == "seed+seed_2nd": + seed += seed_2nd + if seed > 1125899906842624: + seed = seed - 1125899906842624 + elif seed_2nd_mode == "seed-seed_2nd": + seed -= seed_2nd + if seed < 0: + seed += 1125899906842624 + elif seed_2nd_mode != 'ignore': + seed = seed_2nd + + new_latent_image = samples.copy() + base_latent_image = None + + if not leftover_noise: + add_noise = True + noise = Noise_RandomNoise(seed).generate_noise(samples) + + for rp in regional_prompts: + noise = rp.touch_noise(noise) + else: + add_noise = False + noise = None + + for i in range(start_at_step+base_only_steps, adv_steps): + core.update_node_status(unique_id, f"{i}/{steps} steps | ", ((i-start_at_step)*region_len)/total) + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image, + start_at_step=i, end_at_step=i + 1, return_with_leftover_noise=True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio, noise=noise) + + if restore_latent: + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + base_latent_image = new_latent_image.copy() + + j = 1 + for regional_prompt in regional_prompts: + if restore_latent: + new_latent_image = base_latent_image.copy() + + core.update_node_status(unique_id, f"{i}/{steps} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total) + + region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0) + + new_latent_image['noise_mask'] = region_mask + new_latent_image = regional_prompt.sampler.sample_advanced(False, seed, adv_steps, new_latent_image, i, i + 1, True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0] + new_latent_image = base_latent_image + + j += 1 + + add_noise = False + + # finalize + core.update_node_status(unique_id, "finalize") + if base_latent_image is not None: + new_latent_image = base_latent_image + else: + base_latent_image = new_latent_image + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(False, seed, adv_steps, new_latent_image, adv_steps, adv_steps+1, False, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + core.update_node_status(unique_id, f"{steps}/{steps} steps", total) + core.update_node_status(unique_id, "", None) + + if restore_latent: + new_latent_image = base_latent_image + + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class RegionalSamplerAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled", "tooltip": "Whether to add noise"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000, "tooltip": "The starting step of the sampling to be applied at this node within the range of 'steps'."}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000, "tooltip": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps)."}), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions."}), + "restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled", "tooltip": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true."}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it."}), + "latent_image": ("LATENT", {"tooltip": "input latent image"}), + "base_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "The sampler applied outside the area set by the regional_prompt."}), + "regional_prompts": ("REGIONAL_PROMPTS", {"tooltip": "The prompt applied to each region"}), + "additional_mode": (["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between", "tooltip": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change."}), + "additional_sampler": (["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"], {"tooltip": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler."}), + "additional_sigma_ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Multiplier of noise schedule to be applied according to additional_mode."}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + OUTPUT_TOOLTIPS = ("result latent", ) + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def doit(add_noise, noise_seed, steps, start_at_step, end_at_step, overlap_factor, restore_latent, return_with_leftover_noise, latent_image, base_sampler, regional_prompts, + additional_mode, additional_sampler, additional_sigma_ratio, unique_id): + + new_latent_image = latent_image.copy() + new_latent_image['samples'] = comfy.sample.fix_empty_latent_channels(base_sampler.params[0], new_latent_image['samples']) + + if restore_latent: + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + else: + latent_compositor = None + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + region_len = len(regional_prompts) + end_at_step = min(steps, end_at_step) + total = (end_at_step - start_at_step) * region_len + + base_latent_image = None + region_masks = {} + + for i in range(start_at_step, end_at_step-1): + core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | ", ((i-start_at_step)*region_len)/total) + + cur_add_noise = True if i == start_at_step and add_noise else False + + if cur_add_noise: + noise = Noise_RandomNoise(noise_seed).generate_noise(new_latent_image) + for rp in regional_prompts: + noise = rp.touch_noise(noise) + else: + noise = None + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(cur_add_noise, noise_seed, steps, new_latent_image, i, i + 1, True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio, noise=noise) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = new_latent_image.copy() + + j = 1 + for regional_prompt in regional_prompts: + if restore_latent: + new_latent_image = base_latent_image.copy() + + core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total) + + if j not in region_masks: + region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0) + region_masks[j] = region_mask + else: + region_mask = region_masks[j] + + new_latent_image['noise_mask'] = region_mask + new_latent_image = regional_prompt.sampler.sample_advanced(False, noise_seed, steps, new_latent_image, i, i + 1, True, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0] + new_latent_image = base_latent_image + + j += 1 + + # finalize + core.update_node_status(unique_id, "finalize") + if base_latent_image is not None: + new_latent_image = base_latent_image + else: + base_latent_image = new_latent_image + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(False, noise_seed, steps, new_latent_image, end_at_step-1, end_at_step, return_with_leftover_noise, + recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio) + + core.update_node_status(unique_id, f"{end_at_step}/{end_at_step} steps", total) + core.update_node_status(unique_id, "", None) + + if restore_latent: + new_latent_image = base_latent_image + + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class KSamplerBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}), + "scheduler": (core.get_schedulers(), {"tooltip": "noise schedule"}), + "latent_image": ("LATENT", {"tooltip": "input latent image"}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}), + }, + "optional": + { + "scheduler_func_opt": ("SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}), + } + } + + OUTPUT_TOOLTIPS = ("passthrough input basic_pipe", "result latent", "VAE in basic_pipe") + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "ImpactPack/sampling" + + @staticmethod + def sample(basic_pipe, seed, steps, cfg, sampler_name, scheduler, latent_image, denoise=1.0, scheduler_func_opt=None): + model, clip, vae, positive, negative = basic_pipe + latent = impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, scheduler_func=scheduler_func_opt) + return basic_pipe, latent, vae + + +class KSamplerAdvancedBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}), + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable", "tooltip": "Whether to add noise"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}), + "scheduler": (core.get_schedulers(), {"tooltip": "noise schedule"}), + "latent_image": ("LATENT", {"tooltip": "input latent image"}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000, "tooltip": "The starting step of the sampling to be applied at this node within the range of 'steps'."}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000, "tooltip": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps)."}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable", "tooltip": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it."}), + }, + "optional": + { + "scheduler_func_opt": ("SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}), + } + } + + OUTPUT_TOOLTIPS = ("passthrough input basic_pipe", "result latent", "VAE in basic_pipe") + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "ImpactPack/sampling" + + @staticmethod + def sample(basic_pipe, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0, scheduler_func_opt=None): + model, clip, vae, positive, negative = basic_pipe + + latent = separated_sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, scheduler_func=scheduler_func_opt) + return basic_pipe, latent, vae + + +class GITSSchedulerFuncProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "coeff": ("FLOAT", {"default": 1.20, "min": 0.80, "max": 1.50, "step": 0.05, "tooltip": "coeff factor of GITS Scheduler"}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "denoise amount for noise schedule"}), + } + } + + OUTPUT_TOOLTIPS = ("Returns a function that generates a noise schedule using GITSScheduler. This can be used in place of a predetermined noise schedule to dynamically generate a noise schedule based on the steps.",) + + RETURN_TYPES = ("SCHEDULER_FUNC",) + CATEGORY = "ImpactPack/sampling" + + FUNCTION = "doit" + + @staticmethod + def doit(coeff, denoise): + def f(model, sampler, steps): + if 'GITSScheduler' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception("[Impact Pack] ComfyUI is an outdated version. Cannot use GITSScheduler.") + + scheduler = nodes.NODE_CLASS_MAPPINGS['GITSScheduler']() + return scheduler.get_sigmas(coeff, steps, denoise)[0] + + return (f, ) + + +class NegativeConditioningPlaceholder: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + + OUTPUT_TOOLTIPS = ("This is a Placeholder for the FLUX model that does not use Negative Conditioning.",) + + RETURN_TYPES = ("CONDITIONING",) + CATEGORY = "ImpactPack/sampling" + + FUNCTION = "doit" + + @staticmethod + def doit(): + return ("NegativePlaceholder", ) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..48d5e062c8cde0c4cb75ac98091857f4fed53d69 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py @@ -0,0 +1,775 @@ +from impact.utils import any_typ, ByPassTypeTuple, make_3d_mask +import comfy_extras.nodes_mask +from nodes import MAX_RESOLUTION +import torch +import comfy +import sys +import nodes +import re +import impact.core as core +from server import PromptServer +import inspect +import logging + + +class GeneralSwitch: + @classmethod + def INPUT_TYPES(s): + dyn_inputs = {"input1": (any_typ, {"lazy": True, "tooltip": "Any input. When connected, one more input slot is added."}), } + if core.is_execution_model_version_supported(): + stack = inspect.stack() + if stack[2].function == 'get_input_info': + # bypass validation + class AllContainer: + def __contains__(self, item): + return True + + def __getitem__(self, key): + return any_typ, {"lazy": True} + + dyn_inputs = AllContainer() + + inputs = {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The input number you want to output among the inputs"}), + "sel_mode": ("BOOLEAN", {"default": False, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": False, + "tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}), + }, + "optional": dyn_inputs, + "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"} + } + + return inputs + + RETURN_TYPES = (any_typ, "STRING", "INT") + RETURN_NAMES = ("selected_value", "selected_label", "selected_index") + OUTPUT_TOOLTIPS = ("Output is generated only from the input chosen by the 'select' value.", "Slot label of the selected input slot", "Outputs the select value as is") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def check_lazy_status(self, *args, **kwargs): + selected_index = int(kwargs['select']) + input_name = f"input{selected_index}" + + logging.info(f"SELECTED: {input_name}") + + if input_name in kwargs: + return [input_name] + else: + return [] + + @staticmethod + def doit(*args, **kwargs): + selected_index = int(kwargs['select']) + input_name = f"input{selected_index}" + + selected_label = input_name + node_id = kwargs['unique_id'] + + if 'extra_pnginfo' in kwargs and kwargs['extra_pnginfo'] is not None: + nodelist = kwargs['extra_pnginfo']['workflow']['nodes'] + for node in nodelist: + if str(node['id']) == node_id: + inputs = node['inputs'] + + for slot in inputs: + if slot['name'] == input_name and 'label' in slot: + selected_label = slot['label'] + + break + else: + logging.info("[Impact-Pack] The switch node does not guarantee proper functioning in API mode.") + + if input_name in kwargs: + return kwargs[input_name], selected_label, selected_index + else: + logging.info("ImpactSwitch: invalid select index (ignored)") + return None, "", selected_index + +class LatentSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), + "latent1": ("LATENT",), + }, + } + + RETURN_TYPES = ("LATENT", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + input_name = f"latent{int(kwargs['select'])}" + + if input_name in kwargs: + return (kwargs[input_name],) + else: + logging.info("LatentSwitch: invalid select index ('latent1' is selected)") + return (kwargs['latent1'],) + + +class ImageMaskSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "images1": ("IMAGE",), + }, + + "optional": { + "mask1_opt": ("MASK",), + "images2_opt": ("IMAGE",), + "mask2_opt": ("MASK",), + "images3_opt": ("IMAGE",), + "mask3_opt": ("MASK",), + "images4_opt": ("IMAGE",), + "mask4_opt": ("MASK",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK",) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, images1, mask1_opt=None, images2_opt=None, mask2_opt=None, images3_opt=None, mask3_opt=None, + images4_opt=None, mask4_opt=None): + if select == 1: + return images1, mask1_opt, + elif select == 2: + return images2_opt, mask2_opt, + elif select == 3: + return images3_opt, mask3_opt, + else: + return images4_opt, mask4_opt, + + +class GeneralInversedSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The output number you want to send from the input"}), + "input": (any_typ, {"tooltip": "Any input. When connected, one more input slot is added."}), + + }, + "optional": { + "sel_mode": ("BOOLEAN", {"default": False, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": False, + "tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}), + }, + "hidden": {"prompt": "PROMPT", "unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ByPassTypeTuple((any_typ, )) + OUTPUT_TOOLTIPS = ("Output occurs only from the output selected by the 'select' value.\nWhen slots are connected, additional slots are created.", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, prompt, unique_id, input, **kwargs): + if core.is_execution_model_version_supported(): + from comfy_execution.graph import ExecutionBlocker + else: + logging.warning("[Impact Pack] InversedSwitch: ComfyUI is outdated. The 'select_on_execution' mode cannot function properly.") + + res = [] + + # search max output count in prompt + cnt = 0 + for x in prompt.values(): + for y in x.get('inputs', {}).values(): + if isinstance(y, list) and len(y) == 2: + if y[0] == unique_id: + cnt = max(cnt, y[1]) + + for i in range(0, cnt + 1): + if select == i+1: + res.append(input) + elif core.is_execution_model_version_supported(): + res.append(ExecutionBlocker(None)) + else: + res.append(None) + + return res + + +class RemoveNoiseMask: + @classmethod + def INPUT_TYPES(s): + return {"required": {"samples": ("LATENT",)}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, samples): + res = {key: value for key, value in samples.items() if key != 'noise_mask'} + return (res, ) + + +class ImagePasteMasked: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "destination": ("IMAGE",), + "source": ("IMAGE",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "resize_source": ("BOOLEAN", {"default": False}), + }, + "optional": { + "mask": ("MASK",), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "composite" + + CATEGORY = "image" + + def composite(self, destination, source, x, y, resize_source, mask = None): + destination = destination.clone().movedim(-1, 1) + output = comfy_extras.nodes_mask.composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) + return (output,) + + +from impact.utils import any_typ + +class ImpactLogger: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "data": (any_typ,), + "text": ("STRING", {"multiline": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}, + } + + CATEGORY = "ImpactPack/Debug" + + OUTPUT_NODE = True + + RETURN_TYPES = () + FUNCTION = "doit" + + def doit(self, data, text, prompt, extra_pnginfo, unique_id): + shape = "" + if hasattr(data, "shape"): + shape = f"{data.shape} / " + + logging.info(f"[IMPACT LOGGER]: {shape}{data}") + + logging.info(f" PROMPT: {prompt}") + + # for x in prompt: + # if 'inputs' in x and 'populated_text' in x['inputs']: + # print(f"PROMPT: {x['10']['inputs']['populated_text']}") + # + # for x in extra_pnginfo['workflow']['nodes']: + # if x['type'] == 'ImpactWildcardProcessor': + # print(f" WV : {x['widgets_values'][1]}\n") + + PromptServer.instance.send_sync("impact-node-feedback", {"node_id": unique_id, "widget_name": "text", "type": "TEXT", "value": f"{data}"}) + return {} + + +class ImpactDummyInput: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + + CATEGORY = "ImpactPack/Debug" + + RETURN_TYPES = (any_typ,) + FUNCTION = "doit" + + def doit(self): + return ("DUMMY",) + + +class MasksToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"optional": { + "masks": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, masks): + if masks is None: + empty_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return ([empty_mask], ) + + res = [] + + for mask in masks: + res.append(mask) + + res = [make_3d_mask(x) for x in res] + + return (res, ) + + +class MaskListToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("MASK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask): + if len(mask) == 0: + empty_mask = torch.zeros((1, 64, 64), dtype=torch.float32, device="cpu").unsqueeze(0) + return (empty_mask,) + + masks_3d = [make_3d_mask(m) for m in mask] + target_shape = masks_3d[0].shape[1:] + upscaled_masks = [] + for m in masks_3d: + if m.shape[1:] != target_shape: + m = m.unsqueeze(1).repeat(1, 3, 1, 1) + m = comfy.utils.common_upscale(m, target_shape[1], target_shape[0], "lanczos", "center") + m = m[:, 0, :, :] + + upscaled_masks.append(m) + # Concatenate all at once + result = torch.cat(upscaled_masks, dim=0) + return (result,) + + +class ImageListToImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, images): + if len(images) == 0: + return () + if len(images) == 1: + img = images[0] + if img.ndim == 3: # add batch dim if missing + img = img.unsqueeze(0) + return (img,) + + # Start with the first image + image1 = images[0] + if image1.ndim == 3: + image1 = image1.unsqueeze(0) + + for image2 in images[1:]: + # Ensure batch dim + if image2.ndim == 3: + image2 = image2.unsqueeze(0) + + # Ensure same device + if image2.device != image1.device: + image2 = image2.to(image1.device) + + # Ensure HxW match exactly + H, W = image1.shape[1], image1.shape[2] + if image2.shape[1] != H or image2.shape[2] != W: + image2 = comfy.utils.common_upscale( + image2.movedim(-1, 1), # move channels first + W, # width + H, # height + "lanczos", + "center" + ).movedim(1, -1) # move channels back last + + # Ensure channels match + if image2.shape[3] != image1.shape[3]: + # simple fix: truncate or pad channels + min_C = min(image1.shape[3], image2.shape[3]) + image1 = image1[:, :, :, :min_C] + image2 = image2[:, :, :, :min_C] + + # Concatenate along batch dimension + image1 = torch.cat((image1, image2), dim=0) + + return (image1,) + + +class ImageBatchToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, image): + images = [image[i:i + 1, ...] for i in range(image.shape[0])] + return (images, ) + + +class MakeAnyList: + @classmethod + def INPUT_TYPES(s): + return { + "required": {}, + "optional": {"value1": (any_typ,), } + } + + RETURN_TYPES = (any_typ,) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + values = [] + + for k, v in kwargs.items(): + if v is not None: + values.append(v) + + return (values, ) + + +class MakeMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": {"mask1": ("MASK",), }} + + RETURN_TYPES = ("MASK",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + masks = [] + + for k, v in kwargs.items(): + masks.append(v) + + return (masks, ) + + +class NthItemOfAnyList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "any_list": (any_typ,), + "index": ("INT", {"default": 0, "min": -sys.maxsize, "max": sys.maxsize, "step": 1, "tooltip": "The index of the item you want to select from the list. Use negative values to select from the end (e.g., -1 for last item, -2 for second to last)."}), + } + } + + RETURN_TYPES = (any_typ,) + INPUT_IS_LIST = True + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + DESCRIPTION = "Selects the Nth item from a list. If the index is out of range, it returns the last item in the list." + + def doit(self, any_list, index): + i = index[0] + list_len = len(any_list) + if i >= list_len or i < -list_len: + return (any_list[-1],) + else: + return (any_list[i],) + + +class MakeImageList: + @classmethod + def INPUT_TYPES(s): + return {"optional": {"image1": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + images = [] + + for k, v in kwargs.items(): + images.append(v) + + return (images, ) + + +class MakeImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"optional": {"image1": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + images = [value for value in kwargs.values()] + + if len(images) == 1: + return (images[0],) + else: + image1 = images[0] + for image2 in images[1:]: + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "lanczos", "center").movedim(1, -1) + image1 = torch.cat((image1, image2), dim=0) + return (image1,) + + +class MakeMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"optional": {"mask1": ("MASK",), }} + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + masks = [make_3d_mask(value) for value in kwargs.values()] + + if len(masks) == 1: + return (masks[0],) + else: + mask1 = masks[0] + for mask2 in masks[1:]: + if mask1.shape[1:] != mask2.shape[1:]: + mask2 = comfy.utils.common_upscale(mask2.movedim(-1, 1), mask1.shape[2], mask1.shape[1], "lanczos", "center").movedim(1, -1) + mask1 = torch.cat((mask1, mask2), dim=0) + return (mask1,) + + +class ReencodeLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_vae": ("VAE", ), + "output_vae": ("VAE", ), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32, "tooltip": "This setting applies when 'tile_mode' is enabled."}), + } + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_vae, output_vae, tile_size=512, overlap=64): + if tile_mode in ["Both", "Decode(input) only"]: + decoder = nodes.VAEDecodeTiled() + if 'overlap' in inspect.signature(decoder.decode).parameters: + pixels = decoder.decode(input_vae, samples, tile_size, overlap=overlap)[0] + else: + pixels = decoder.decode(input_vae, samples, tile_size, overlap=overlap)[0] + else: + pixels = nodes.VAEDecode().decode(input_vae, samples)[0] + + if tile_mode in ["Both", "Encode(output) only"]: + encoder = nodes.VAEEncodeTiled() + if 'overlap' in inspect.signature(encoder.encode).parameters: + return encoder.encode(output_vae, pixels, tile_size, overlap=overlap) + else: + return encoder.encode(output_vae, pixels, tile_size) + else: + return nodes.VAEEncode().encode(output_vae, pixels) + + +class ReencodeLatentPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_basic_pipe": ("BASIC_PIPE", ), + "output_basic_pipe": ("BASIC_PIPE", ), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_basic_pipe, output_basic_pipe): + _, _, input_vae, _, _ = input_basic_pipe + _, _, output_vae, _, _ = output_basic_pipe + return ReencodeLatent().doit(samples, tile_mode, input_vae, output_vae) + + +class StringSelector: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "strings": ("STRING", {"multiline": True}), + "multiline": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "select": ("INT", {"min": 0, "max": sys.maxsize, "step": 1, "default": 0}), + }} + + RETURN_TYPES = ("STRING",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, strings, multiline, select): + lines = strings.split('\n') + + if multiline: + result = [] + current_string = "" + + for line in lines: + if line.startswith("#"): + if current_string: + result.append(current_string.strip()) + current_string = "" + current_string += line + "\n" + + if current_string: + result.append(current_string.strip()) + + if len(result) == 0: + selected = strings + else: + selected = result[select % len(result)] + + if selected.startswith('#'): + selected = selected[1:] + else: + if len(lines) == 0: + selected = strings + else: + selected = lines[select % len(lines)] + + return (selected, ) + + +class StringListToString: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "join_with": ("STRING", {"default": "\\n"}), + "string_list": ("STRING", {"forceInput": True}), + } + } + + INPUT_IS_LIST = True + RETURN_TYPES = ("STRING",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, join_with, string_list): + # convert \\n to newline character + if join_with[0] == "\\n": + join_with[0] = "\n" + + joined_text = join_with[0].join(string_list) + + return (joined_text,) + + +class WildcardPromptFromString: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": ("STRING", {"forceInput": True}), + "delimiter": ("STRING", {"multiline": False, "default": "\\n" }), + "prefix_all": ("STRING", {"multiline": False}), + "postfix_all": ("STRING", {"multiline": False}), + "restrict_to_tags": ("STRING", {"multiline": False}), + "exclude_tags": ("STRING", {"multiline": False}) + }, + } + + RETURN_TYPES = ("STRING", "STRING",) + RETURN_NAMES = ("wildcard", "segs_labels",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, string, delimiter, prefix_all, postfix_all, restrict_to_tags, exclude_tags): + # convert \\n to newline character + if delimiter == "\\n": + delimiter = "\n" + + # some sanity checks and normalization for later processing + if prefix_all is None: + prefix_all = "" + if postfix_all is None: + postfix_all = "" + if restrict_to_tags is None: + restrict_to_tags = "" + if exclude_tags is None: + exclude_tags = "" + + restrict_to_tags = restrict_to_tags.split(", ") + exclude_tags = exclude_tags.split(", ") + + # build the wildcard prompt per list entry + output = ["[LAB]"] + labels = [] + for x in string.split(delimiter): + label = str(len(labels) + 1) + labels.append(label) + x = x.split(", ") + # restrict to tags + if restrict_to_tags != [""]: + x = list(set(x) & set(restrict_to_tags)) + # remove tags + if exclude_tags != [""]: + x = list(set(x) - set(exclude_tags)) + # next row: + prompt_for_seg = f'[{label}] {prefix_all} {", ".join(x)} {postfix_all}'.strip() + output.append(prompt_for_seg) + output = "\n".join(output) + + # clean string: fixup double spaces, commas etc. + output = re.sub(r' ,', ',', output) + output = re.sub(r' +', ' ', output) + output = re.sub(r',,+', ',', output) + output = re.sub(r'\n, ', '\n', output) + + return output, ", ".join(labels) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/utils.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7dfb28f47587fecb10da728e45f8f8f4cafa8aeb --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/utils.py @@ -0,0 +1,743 @@ +import torch +import torchvision +import cv2 +import numpy as np +import folder_paths +import nodes +from . import config +from PIL import Image +import comfy +import time +import logging + + +class TensorBatchBuilder: + def __init__(self): + self.tensor = None + + def concat(self, new_tensor): + if self.tensor is None: + self.tensor = new_tensor + else: + self.tensor = torch.concat((self.tensor, new_tensor), dim=0) + + +def tensor_convert_rgba(image, prefer_copy=True): + """Assumes NHWC format tensor with 1, 3 or 4 channels.""" + _tensor_check_image(image) + n_channel = image.shape[-1] + if n_channel == 4: + return image + + if n_channel == 3: + alpha = torch.ones((*image.shape[:-1], 1)) + return torch.cat((image, alpha), axis=-1) + + if n_channel == 1: + if prefer_copy: + image = image.repeat(1, -1, -1, 4) + else: + image = image.expand(1, -1, -1, 3) + return image + + # NOTE: Similar error message as in PIL, for easier googling :P + raise ValueError(f"illegal conversion (channels: {n_channel} -> 4)") + + +def tensor_convert_rgb(image, prefer_copy=True): + """Assumes NHWC format tensor with 1, 3 or 4 channels.""" + _tensor_check_image(image) + n_channel = image.shape[-1] + if n_channel == 3: + return image + + if n_channel == 4: + image = image[..., :3] + if prefer_copy: + image = image.copy() + return image + + if n_channel == 1: + if prefer_copy: + image = image.repeat(1, -1, -1, 4) + else: + image = image.expand(1, -1, -1, 3) + return image + + # NOTE: Same error message as in PIL, for easier googling :P + raise ValueError(f"illegal conversion (channels: {n_channel} -> 3)") + + +def resize_with_padding(image, target_w: int, target_h: int): + _tensor_check_image(image) + b, h, w, c = image.shape + image = image.permute(0, 3, 1, 2) # B, C, H, W + + scale = min(target_w / w, target_h / h) + new_w, new_h = int(w * scale), int(h * scale) + + image = F.interpolate(image, size=(new_h, new_w), mode="bilinear", align_corners=False) + + pad_left = (target_w - new_w) // 2 + pad_right = target_w - new_w - pad_left + pad_top = (target_h - new_h) // 2 + pad_bottom = target_h - new_h - pad_top + + image = F.pad(image, (pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0) + + image = image.permute(0, 2, 3, 1) # B, H, W, C + return image, (pad_top, pad_bottom, pad_left, pad_right) + + +def remove_padding(image, padding): + pad_top, pad_bottom, pad_left, pad_right = padding + return image[:, pad_top:image.shape[1] - pad_bottom, pad_left:image.shape[2] - pad_right, :] + + +def adjust_bbox_after_resize(bbox, original_size, target_size, padding): + """ + bbox: (x1, y1, x2, y2) in original image + original_size: (original_h, original_w) + target_size: (target_h, target_w) + padding: (pad_top, pad_bottom, pad_left, pad_right) + """ + orig_h, orig_w = original_size + target_h, target_w = target_size + pad_top, pad_bottom, pad_left, pad_right = padding + + scale = min(target_w / orig_w, target_h / orig_h) + + # Apply scale + x1 = int(bbox[0] * scale + pad_left) + y1 = int(bbox[1] * scale + pad_top) + x2 = int(bbox[2] * scale + pad_left) + y2 = int(bbox[3] * scale + pad_top) + + return x1, y1, x2, y2 + + +def general_tensor_resize(image, w: int, h: int): + _tensor_check_image(image) + image = image.permute(0, 3, 1, 2) + image = torch.nn.functional.interpolate(image, size=(h, w), mode="bilinear") + image = image.permute(0, 2, 3, 1) + return image + + +# TODO: Sadly, we need LANCZOS +LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) +def tensor_resize(image, w: int, h: int): + _tensor_check_image(image) + if image.shape[3] >= 3: + scaled_images = TensorBatchBuilder() + for single_image in image: + single_image = single_image.unsqueeze(0) + single_pil = tensor2pil(single_image) + scaled_pil = single_pil.resize((w, h), resample=LANCZOS) + + single_image = pil2tensor(scaled_pil) + scaled_images.concat(single_image) + + return scaled_images.tensor + else: + return general_tensor_resize(image, w, h) + + +def tensor_get_size(image): + """Mimicking `PIL.Image.size`""" + _tensor_check_image(image) + _, h, w, _ = image.shape + return (w, h) + + +def tensor2pil(image): + _tensor_check_image(image) + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(0), 0, 255).astype(np.uint8)) + + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + +def numpy2pil(image): + return Image.fromarray(np.clip(255. * image.squeeze(0), 0, 255).astype(np.uint8)) + + +def to_pil(image): + if isinstance(image, Image.Image): + return image + if isinstance(image, torch.Tensor): + return tensor2pil(image) + if isinstance(image, np.ndarray): + return numpy2pil(image) + raise ValueError(f"Cannot convert {type(image)} to PIL.Image") + + +def to_tensor(image): + if isinstance(image, Image.Image): + return torch.from_numpy(np.array(image)) / 255.0 + if isinstance(image, torch.Tensor): + return image + if isinstance(image, np.ndarray): + return torch.from_numpy(image) + raise ValueError(f"Cannot convert {type(image)} to torch.Tensor") + + +def to_numpy(image): + if isinstance(image, Image.Image): + return np.array(image) + if isinstance(image, torch.Tensor): + return image.numpy() + if isinstance(image, np.ndarray): + return image + raise ValueError(f"Cannot convert {type(image)} to numpy.ndarray") + +def tensor_putalpha(image, mask): + _tensor_check_image(image) + _tensor_check_mask(mask) + image[..., -1] = mask[..., 0] + + +def _tensor_check_image(image): + if image.ndim != 4: + raise ValueError(f"Expected NHWC tensor, but found {image.ndim} dimensions") + if image.shape[-1] not in (1, 3, 4): + raise ValueError(f"Expected 1, 3 or 4 channels for image, but found {image.shape[-1]} channels") + return + + +def _tensor_check_mask(mask): + if mask.ndim != 4: + raise ValueError(f"Expected NHWC tensor, but found {mask.ndim} dimensions") + if mask.shape[-1] != 1: + raise ValueError(f"Expected 1 channel for mask, but found {mask.shape[-1]} channels") + return + + +def tensor_crop(image, crop_region): + _tensor_check_image(image) + return crop_ndarray4(image, crop_region) + + +def tensor2numpy(image): + _tensor_check_image(image) + return image.numpy() + + +def tensor_paste(image1, image2, left_top, mask): + """ + Pastes image2 onto image1 at position left_top using mask. + Supports both RGB and RGBA images. + """ + _tensor_check_image(image1) + _tensor_check_image(image2) + _tensor_check_mask(mask) + + if image2.shape[1:3] != mask.shape[1:3]: + mask = resize_mask(mask.squeeze(dim=3), image2.shape[1:3]).unsqueeze(dim=3) + + x, y = left_top + _, h1, w1, c1 = image1.shape + _, h2, w2, c2 = image2.shape + + # Calculate image patch size + w = min(w1, x + w2) - x + h = min(h1, y + h2) - y + + # If the patch is out of bound, nothing to do! + if w <= 0 or h <= 0: + return + + mask = mask[:, :h, :w, :] + + # Get the region to be modified + region1 = image1[:, y:y+h, x:x+w, :] + region2 = image2[:, :h, :w, :] + + # Handle RGB and RGBA cases + if c1 == 3 and c2 == 3: + # Both RGB - simple case + image1[:, y:y+h, x:x+w, :] = (1 - mask) * region1 + mask * region2 + + elif c1 == 4 and c2 == 4: + # Both RGBA - need to handle alpha channel separately + # RGB channels + image1[:, y:y+h, x:x+w, :3] = ( + (1 - mask) * region1[:, :, :, :3] + + mask * region2[:, :, :, :3] + ) + + # Alpha channel - use "over" composition + a1 = region1[:, :, :, 3:4] + a2 = region2[:, :, :, 3:4] * mask + new_alpha = a1 + a2 * (1 - a1) + image1[:, y:y+h, x:x+w, 3:4] = new_alpha + + elif c1 == 4 and c2 == 3: + # Target is RGBA, source is RGB - assume source is fully opaque + image1[:, y:y+h, x:x+w, :3] = ( + (1 - mask) * region1[:, :, :, :3] + + mask * region2 + ) + # Alpha channel - reduce alpha where mask is applied + image1[:, y:y+h, x:x+w, 3:4] = region1[:, :, :, 3:4] * (1 - mask) + mask + + elif c1 == 3 and c2 == 4: + # Target is RGB, source is RGBA - apply source alpha to mask + effective_mask = mask * region2[:, :, :, 3:4] + image1[:, y:y+h, x:x+w, :] = ( + (1 - effective_mask) * region1 + + effective_mask * region2[:, :, :, :3] + ) + + return + + +def center_of_bbox(bbox): + w, h = bbox[2] - bbox[0], bbox[3] - bbox[1] + return bbox[0] + w/2, bbox[1] + h/2 + + +def combine_masks(masks): + if len(masks) == 0: + return None + else: + initial_cv2_mask = np.array(masks[0][1]) + combined_cv2_mask = initial_cv2_mask + + for i in range(1, len(masks)): + cv2_mask = np.array(masks[i][1]) + + if combined_cv2_mask.shape == cv2_mask.shape: + combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask) + else: + # do nothing - incompatible mask + pass + + mask = torch.from_numpy(combined_cv2_mask) + return mask + + +def combine_masks2(masks): + if len(masks) == 0: + return None + else: + initial_cv2_mask = np.array(masks[0]).astype(np.uint8) + combined_cv2_mask = initial_cv2_mask + + for i in range(1, len(masks)): + cv2_mask = np.array(masks[i]).astype(np.uint8) + + if combined_cv2_mask.shape == cv2_mask.shape: + combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask) + else: + # do nothing - incompatible mask + pass + + mask = torch.from_numpy(combined_cv2_mask) + return mask + + +def bitwise_and_masks(mask1, mask2): + mask1 = mask1.cpu() + mask2 = mask2.cpu() + cv2_mask1 = np.array(mask1) + cv2_mask2 = np.array(mask2) + + if cv2_mask1.shape == cv2_mask2.shape: + cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2) + return torch.from_numpy(cv2_mask) + else: + # do nothing - incompatible mask shape: mostly empty mask + return mask1 + + +def to_binary_mask(mask, threshold=0): + mask = make_3d_mask(mask) + + mask = mask.clone().cpu() + mask[mask > threshold] = 1. + mask[mask <= threshold] = 0. + return mask + + +def use_gpu_opencv(): + return not config.get_config()['disable_gpu_opencv'] + + +def dilate_mask(mask, dilation_factor, iter=1): + if dilation_factor == 0: + return make_2d_mask(mask) + + mask = make_2d_mask(mask) + + kernel = np.ones((abs(dilation_factor), abs(dilation_factor)), np.uint8) + + if use_gpu_opencv(): + mask = cv2.UMat(mask) + kernel = cv2.UMat(kernel) + + if dilation_factor > 0: + result = cv2.dilate(mask, kernel, iter) + else: + result = cv2.erode(mask, kernel, iter) + + if use_gpu_opencv(): + return result.get() + else: + return result + + +def dilate_masks(segmasks, dilation_factor, iter=1): + if dilation_factor == 0: + return segmasks + + dilated_masks = [] + kernel = np.ones((abs(dilation_factor), abs(dilation_factor)), np.uint8) + + if use_gpu_opencv(): + kernel = cv2.UMat(kernel) + + for i in range(len(segmasks)): + cv2_mask = segmasks[i][1] + + if use_gpu_opencv(): + cv2_mask = cv2.UMat(cv2_mask) + + if dilation_factor > 0: + dilated_mask = cv2.dilate(cv2_mask, kernel, iter) + else: + dilated_mask = cv2.erode(cv2_mask, kernel, iter) + + if use_gpu_opencv(): + dilated_mask = dilated_mask.get() + + item = (segmasks[i][0], dilated_mask, segmasks[i][2]) + dilated_masks.append(item) + + return dilated_masks + +import torch.nn.functional as F +def feather_mask(mask, thickness): + mask = mask.permute(0, 3, 1, 2) + + # Gaussian kernel for blurring + kernel_size = 2 * int(thickness) + 1 + sigma = thickness / 3 # Adjust the sigma value as needed + blur_kernel = _gaussian_kernel(kernel_size, sigma).to(mask.device, mask.dtype) + + # Apply blur to the mask + blurred_mask = F.conv2d(mask, blur_kernel.unsqueeze(0).unsqueeze(0), padding=thickness) + + blurred_mask = blurred_mask.permute(0, 2, 3, 1) + + return blurred_mask + +def _gaussian_kernel(kernel_size, sigma): + # Generate a 1D Gaussian kernel + kernel = torch.exp(-(torch.arange(kernel_size) - kernel_size // 2)**2 / (2 * sigma**2)) + return kernel / kernel.sum() + + +def tensor_gaussian_blur_mask(mask, kernel_size, sigma=10.0): + """Return NHWC torch.Tenser from ndim == 2 or 4 `np.ndarray` or `torch.Tensor`""" + if isinstance(mask, np.ndarray): + mask = torch.from_numpy(mask) + + if mask.ndim == 2: + mask = mask[None, ..., None] + elif mask.ndim == 3: + mask = mask[..., None] + + _tensor_check_mask(mask) + + if kernel_size <= 0: + return mask + + kernel_size = kernel_size*2+1 + + shortest = min(mask.shape[1], mask.shape[2]) + if shortest <= kernel_size: + kernel_size = int(shortest/2) + if kernel_size % 2 == 0: + kernel_size += 1 + if kernel_size < 3: + return mask # skip feathering + + prev_device = mask.device + device = comfy.model_management.get_torch_device() + mask.to(device) + + # apply gaussian blur + mask = mask[:, None, ..., 0] + blurred_mask = torchvision.transforms.GaussianBlur(kernel_size=kernel_size, sigma=sigma)(mask) + blurred_mask = blurred_mask[:, 0, ..., None] + + blurred_mask.to(prev_device) + + return blurred_mask + + +def subtract_masks(mask1, mask2): + mask1 = mask1.cpu() + mask2 = mask2.cpu() + cv2_mask1 = np.array(mask1) * 255 + cv2_mask2 = np.array(mask2) * 255 + + if cv2_mask1.shape == cv2_mask2.shape: + cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2) + return torch.clamp(torch.from_numpy(cv2_mask) / 255.0, min=0, max=1) + else: + # do nothing - incompatible mask shape: mostly empty mask + return mask1 + + +def add_masks(mask1, mask2): + mask1 = mask1.cpu() + mask2 = mask2.cpu() + cv2_mask1 = np.array(mask1) * 255 + cv2_mask2 = np.array(mask2) * 255 + + if cv2_mask1.shape == cv2_mask2.shape: + cv2_mask = cv2.add(cv2_mask1, cv2_mask2) + return torch.clamp(torch.from_numpy(cv2_mask) / 255.0, min=0, max=1) + else: + # do nothing - incompatible mask shape: mostly empty mask + return mask1 + + +def normalize_region(limit, startp, size): + if startp < 0: + new_endp = min(limit, size) + new_startp = 0 + elif startp + size > limit: + new_startp = max(0, limit - size) + new_endp = limit + else: + new_startp = startp + new_endp = min(limit, startp+size) + + return int(new_startp), int(new_endp) + + +def make_crop_region(w, h, bbox, crop_factor, crop_min_size=None): + x1 = bbox[0] + y1 = bbox[1] + x2 = bbox[2] + y2 = bbox[3] + + bbox_w = x2 - x1 + bbox_h = y2 - y1 + + crop_w = bbox_w * crop_factor + crop_h = bbox_h * crop_factor + + if crop_min_size is not None: + crop_w = max(crop_min_size, crop_w) + crop_h = max(crop_min_size, crop_h) + + kernel_x = x1 + bbox_w / 2 + kernel_y = y1 + bbox_h / 2 + + new_x1 = int(kernel_x - crop_w / 2) + new_y1 = int(kernel_y - crop_h / 2) + + # make sure position in (w,h) + new_x1, new_x2 = normalize_region(w, new_x1, crop_w) + new_y1, new_y2 = normalize_region(h, new_y1, crop_h) + + return [new_x1, new_y1, new_x2, new_y2] + + +def crop_ndarray4(npimg, crop_region): + x1 = crop_region[0] + y1 = crop_region[1] + x2 = crop_region[2] + y2 = crop_region[3] + + cropped = npimg[:, y1:y2, x1:x2, :] + + return cropped + + +crop_tensor4 = crop_ndarray4 + + +def crop_ndarray3(npimg, crop_region): + x1 = crop_region[0] + y1 = crop_region[1] + x2 = crop_region[2] + y2 = crop_region[3] + + cropped = npimg[:, y1:y2, x1:x2] + + return cropped + + +def crop_ndarray2(npimg, crop_region): + x1 = crop_region[0] + y1 = crop_region[1] + x2 = crop_region[2] + y2 = crop_region[3] + + cropped = npimg[y1:y2, x1:x2] + + return cropped + + +def crop_image(image, crop_region): + return crop_tensor4(image, crop_region) + + +def to_latent_image(pixels, vae, vae_tiled_encode=False): + x = pixels.shape[1] + y = pixels.shape[2] + if pixels.shape[1] != x or pixels.shape[2] != y: + pixels = pixels[:, :x, :y, :] + + start = time.time() + if vae_tiled_encode: + encoded = nodes.VAEEncodeTiled().encode(vae, pixels, 512, overlap=64)[0] # using default settings + logging.info(f"[Impact Pack] vae encoded (tiled) in {time.time() - start:.1f}s") + else: + encoded = nodes.VAEEncode().encode(vae, pixels)[0] + logging.info(f"[Impact Pack] vae encoded in {time.time() - start:.1f}s") + + return encoded + + +def empty_pil_tensor(w=64, h=64): + return torch.zeros((1, h, w, 3), dtype=torch.float32) + + +def make_2d_mask(mask): + if len(mask.shape) == 4: + return mask.squeeze(0).squeeze(0) + + elif len(mask.shape) == 3: + return mask.squeeze(0) + + return mask + + +def make_3d_mask(mask): + if len(mask.shape) == 4: + return mask.squeeze(0) + + elif len(mask.shape) == 2: + return mask.unsqueeze(0) + + return mask + + +def make_4d_mask(mask): + if len(mask.shape) == 3: + return mask.unsqueeze(0) + + elif len(mask.shape) == 2: + return mask.unsqueeze(0).unsqueeze(0) + + return mask + + +def is_same_device(a, b): + a_device = torch.device(a) if isinstance(a, str) else a + b_device = torch.device(b) if isinstance(b, str) else b + return a_device.type == b_device.type and a_device.index == b_device.index + + +def collect_non_reroute_nodes(node_map, links, res, node_id): + if node_map[node_id]['type'] != 'Reroute' and node_map[node_id]['type'] != 'Reroute (rgthree)': + res.append(node_id) + else: + for link in node_map[node_id]['outputs'][0]['links']: + next_node_id = str(links[link][2]) + collect_non_reroute_nodes(node_map, links, res, next_node_id) + + +from torchvision.transforms.functional import to_pil_image + + +def resize_mask(mask, size): + mask = make_4d_mask(mask) + resized_mask = torch.nn.functional.interpolate(mask, size=size, mode='bilinear', align_corners=False) + return resized_mask.squeeze(0) + + +def apply_mask_alpha_to_pil(decoded_pil, mask): + decoded_rgba = decoded_pil.convert('RGBA') + mask_pil = to_pil_image(mask) + decoded_rgba.putalpha(mask_pil) + + return decoded_rgba + + +def flatten_mask(all_masks): + merged_mask = (all_masks[0] * 255).to(torch.uint8) + for mask in all_masks[1:]: + merged_mask |= (mask * 255).to(torch.uint8) + + return merged_mask + + +def try_install_custom_node(custom_node_url, msg): + try: + import cm_global + cm_global.try_call(api='cm.try-install-custom-node', + sender="Impact Pack", custom_node_url=custom_node_url, msg=msg) + except Exception: + logging.info(msg) + logging.info("[Impact Pack] ComfyUI-Manager is outdated. The custom node installation feature is not available.") + + +# author: Trung0246 ---> +class TautologyStr(str): + def __ne__(self, other): + return False + + +class ByPassTypeTuple(tuple): + def __getitem__(self, index): + if index > 0: + index = 0 + item = super().__getitem__(index) + if isinstance(item, str): + return TautologyStr(item) + return item + + +class NonListIterable: + def __init__(self, data): + self.data = data + + def __getitem__(self, index): + return self.data[index] + + +def add_folder_path_and_extensions(folder_name, full_folder_paths, extensions): + # Iterate over the list of full folder paths + for full_folder_path in full_folder_paths: + # Use the provided function to add each model folder path + folder_paths.add_model_folder_path(folder_name, full_folder_path) + + # Now handle the extensions. If the folder name already exists, update the extensions + if folder_name in folder_paths.folder_names_and_paths: + # Unpack the current paths and extensions + current_paths, current_extensions = folder_paths.folder_names_and_paths[folder_name] + # Update the extensions set with the new extensions + updated_extensions = current_extensions | extensions + # Reassign the updated tuple back to the dictionary + folder_paths.folder_names_and_paths[folder_name] = (current_paths, updated_extensions) + else: + # If the folder name was not present, add_model_folder_path would have added it with the last path + # Now we just need to update the set of extensions as it would be an empty set + # Also ensure that all paths are included (since add_model_folder_path adds only one path at a time) + folder_paths.folder_names_and_paths[folder_name] = (full_folder_paths, extensions) +# <--- + +# wildcard trick is taken from pythongossss's +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + +any_typ = AnyType("*") diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py new file mode 100644 index 0000000000000000000000000000000000000000..04d41fe935380f6ce11ce47733ea7de1b36bfff2 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py @@ -0,0 +1,1251 @@ +import logging +import os +import random +import re +import threading + +import folder_paths +import nodes +import numpy as np +import yaml +from impact import config, utils + +wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "wildcards")) + +RE_WildCardQuantifier = re.compile(r"(?P\d+)#__(?P[\w.\-+/*\\]+?)__", re.IGNORECASE) +wildcard_lock = threading.Lock() +wildcard_dict = {} + +# Cache size limit in bytes (default: 50MB) +WILDCARD_CACHE_LIMIT = 50 * 1024 * 1024 +# Flag to track if on-demand mode is active +_on_demand_mode = False + +# Two-phase loading support +# available_wildcards: All discovered wildcard files (metadata only) +# loaded_wildcards: Actually loaded wildcard data +available_wildcards = {} # key -> file_path mapping +loaded_wildcards = {} # key -> loaded data + + +class LazyWildcardLoader: + """ + Lazy loader for wildcard data to reduce memory usage. + Acts as a list-like proxy that loads data on first access. + """ + def __init__(self, file_path, file_type='txt'): + self.file_path = file_path + self.file_type = file_type + self._data = None + self._loaded = False + + def _load_txt(self): + """Load .txt wildcard file""" + try: + with open(self.file_path, 'r', encoding="ISO-8859-1") as f: + lines = f.read().splitlines() + return [x for x in lines if x.strip() and not x.strip().startswith('#')] + except (yaml.reader.ReaderError, UnicodeDecodeError): + with open(self.file_path, 'r', encoding="UTF-8", errors="ignore") as f: + lines = f.read().splitlines() + return [x for x in lines if x.strip() and not x.strip().startswith('#')] + + def _load_yaml(self): + """Load .yaml/.yml wildcard file""" + try: + with open(self.file_path, 'r', encoding="ISO-8859-1") as f: + return yaml.load(f, Loader=yaml.FullLoader) + except (yaml.reader.ReaderError, UnicodeDecodeError): + with open(self.file_path, 'r', encoding="UTF-8", errors="ignore") as f: + return yaml.load(f, Loader=yaml.FullLoader) + + def get_data(self): + """Get wildcard data, loading if necessary""" + if not self._loaded: + with wildcard_lock: + if not self._loaded: # Double-check locking + if self.file_type == 'txt': + self._data = self._load_txt() + elif self.file_type in ('yaml', 'yml'): + self._data = self._load_yaml() + self._loaded = True + return self._data + + # List-like interface methods + def __getitem__(self, index): + """Support indexing like a list""" + return self.get_data()[index] + + def __iter__(self): + """Support iteration""" + return iter(self.get_data()) + + def __len__(self): + """Support len() function""" + return len(self.get_data()) + + def __contains__(self, item): + """Support 'in' operator""" + return item in self.get_data() + + def __repr__(self): + """String representation""" + if self._loaded: + return f"LazyWildcardLoader({self.file_path}, loaded={len(self._data)} items)" + return f"LazyWildcardLoader({self.file_path}, not loaded)" + + def __bool__(self): + """Support boolean evaluation""" + return len(self.get_data()) > 0 + + # Common list methods that may be used + def count(self, value): + """Count occurrences of value""" + return self.get_data().count(value) + + def index(self, value, start=0, stop=None): + """Find index of value""" + if stop is None: + return self.get_data().index(value, start) + return self.get_data().index(value, start, stop) + + +def calculate_directory_size(directory_path, limit=None): + """ + Calculate total size of all wildcard files in directory. + + Args: + directory_path: Path to scan + limit: Optional size limit in bytes. If provided, stops scanning immediately + when total_size >= limit (for fast mode detection) + + Returns: + Total size in bytes (or limit if exceeded) + """ + total_size = 0 + try: + for root, directories, files in os.walk(directory_path, followlinks=True): + for file in files: + if file.endswith(('.txt', '.yaml', '.yml')): + file_path = os.path.join(root, file) + try: + total_size += os.path.getsize(file_path) + + # Early termination: stop scanning when limit exceeded + if limit and total_size >= limit: + return total_size + except (OSError, FileNotFoundError): + pass + except (OSError, FileNotFoundError): + pass + return total_size + + +def scan_wildcard_metadata(wildcard_path): + """ + Scan directory for wildcard files and collect metadata only (no data loading). + + This is much faster than full loading for large wildcard collections. + Only stores file paths in available_wildcards, actual data loaded on-demand. + + Args: + wildcard_path: Directory to scan for wildcard files + + Returns: + Number of wildcard files discovered + """ + global available_wildcards + + discovered = 0 + try: + for root, directories, files in os.walk(wildcard_path, followlinks=True): + for file in files: + if file.endswith('.txt'): + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, wildcard_path) + key = wildcard_normalize(os.path.splitext(rel_path)[0]) + available_wildcards[key] = file_path + discovered += 1 + elif file.endswith('.yaml') or file.endswith('.yml'): + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, wildcard_path) + # YAML files are stored with their extension for proper loading + key_base = wildcard_normalize(os.path.splitext(rel_path)[0]) + available_wildcards[key_base] = file_path + discovered += 1 + except (OSError, FileNotFoundError) as e: + logging.warning(f"[Impact Pack] Error scanning wildcard directory {wildcard_path}: {e}") + + return discovered + + +def get_wildcard_list(): + """ + Get list of all available wildcards. + + Returns: + - In full cache mode: all loaded wildcards + - In on-demand mode: only loaded wildcards (same as get_loaded_wildcard_list) + """ + with wildcard_lock: + if _on_demand_mode: + return [f"__{x}__" for x in loaded_wildcards.keys()] + return [f"__{x}__" for x in wildcard_dict.keys()] + + +def get_loaded_wildcard_list(): + """ + Get list of actually loaded wildcards (on-demand mode only). + + Returns: + List of wildcards that have been loaded into memory. + In full cache mode, returns same as get_wildcard_list(). + """ + with wildcard_lock: + if _on_demand_mode: + return [f"__{x}__" for x in loaded_wildcards.keys()] + return [f"__{x}__" for x in wildcard_dict.keys()] + + +def get_wildcard_dict(): + global wildcard_dict + with wildcard_lock: + return wildcard_dict + + +def find_wildcard_file(key): + """ + Dynamically find a wildcard file by key (on-demand mode). + + For YAML files with nested structure (e.g., "colors/warm"): + - Tries to find the parent YAML file (e.g., "colors.yaml") + - Returns the YAML file path if found + + Searches in: + 1. Main wildcards directory + 2. Custom wildcards directory (if configured) + + Args: + key: normalized wildcard key (e.g., "samples/flower", "colors/warm") + + Returns: + Tuple of (file_path, is_yaml_nested) if found, (None, False) otherwise + """ + # For YAML nested keys like "colors/warm", try parent file "colors.yaml" + # Also try exact match for TXT files or top-level YAML keys + + # Case 1: Direct file match (TXT or top-level YAML) + potential_paths = [ + f"{key}.txt", + f"{key}.yaml", + f"{key}.yml" + ] + + for rel_path in potential_paths: + file_path = os.path.join(wildcards_path, rel_path) + if os.path.isfile(file_path): + return (file_path, file_path.endswith(('.yaml', '.yml'))) + + # Custom wildcards directory + try: + custom_path = config.get_config().get('custom_wildcards') + if custom_path and os.path.exists(custom_path): + for rel_path in potential_paths: + file_path = os.path.join(custom_path, rel_path) + if os.path.isfile(file_path): + return (file_path, file_path.endswith(('.yaml', '.yml'))) + except Exception: + pass + + # Case 2: YAML nested key (e.g., "colors/warm" → "colors.yaml") + if '/' in key: + parent_key = key.split('/')[0] + yaml_paths = [ + f"{parent_key}.yaml", + f"{parent_key}.yml" + ] + + for rel_path in yaml_paths: + file_path = os.path.join(wildcards_path, rel_path) + if os.path.isfile(file_path): + return (file_path, True) + + # Custom wildcards directory + try: + custom_path = config.get_config().get('custom_wildcards') + if custom_path and os.path.exists(custom_path): + for rel_path in yaml_paths: + file_path = os.path.join(custom_path, rel_path) + if os.path.isfile(file_path): + return (file_path, True) + except Exception: + pass + + return (None, False) + + +def get_wildcard_value(key): + """ + Get wildcard value from dictionary, automatically handling LazyWildcardLoader + and on-demand loading. + + Args: + key: wildcard key + + Returns: + List of wildcard options (loaded if necessary), or None if not found + """ + global loaded_wildcards + + # On-demand mode: dynamic file discovery and loading + if _on_demand_mode: + # Check if already loaded in cache (TXT on-demand or YAML pre-loaded) + if key in loaded_wildcards: + return loaded_wildcards[key] + + # Try to find and load TXT files dynamically + # YAML files are already pre-loaded, so if not in cache, it doesn't exist + file_path, is_yaml = find_wildcard_file(key) + if file_path is None: + # Fallback: Try pattern matching to find wildcards at any depth + # Example: "dragon" matches "dragon.txt", "fantasy/dragon.txt", "dragon/fire.txt", etc. + matched_keys = [] + for k in available_wildcards.keys(): + if (k == key or + k.endswith('/' + key) or + k.startswith(key + '/') or + ('/' + key + '/') in k): + matched_keys.append(k) + + if matched_keys: + # Collect all options from matched keys + all_options = [] + for matched_key in matched_keys: + # Load each matched wildcard + value = get_wildcard_value(matched_key) + if value: + all_options.extend(value) + + if all_options: + # Cache the combined result + loaded_wildcards[key] = all_options + logging.info(f"[Impact Pack] Wildcard '{key}' resolved via depth-agnostic pattern matching to {len(matched_keys)} keys: {matched_keys}") + return all_options + + return None + + # YAML files should already be loaded + if is_yaml or file_path.endswith(('.yaml', '.yml')): + # YAML was pre-loaded but key not found + logging.warning(f"[Impact Pack] YAML wildcard '{key}' not found (pre-load issue)") + return None + + # Load TXT file on-demand + try: + data = load_txt_wildcard(file_path) + loaded_wildcards[key] = data + logging.debug(f"[Impact Pack] Loaded TXT wildcard '{key}' on-demand from {file_path}") + return data + except Exception as e: + logging.warning(f"[Impact Pack] Failed to load wildcard {key} from {file_path}: {e}") + return None + + # Full cache mode or fallback: use wildcard_dict + value = wildcard_dict.get(key) + if isinstance(value, LazyWildcardLoader): + return value.get_data() + return value + + +def load_txt_wildcard(file_path): + """Load a .txt wildcard file""" + try: + with open(file_path, 'r', encoding="ISO-8859-1") as f: + lines = f.read().splitlines() + return [x for x in lines if x.strip() and not x.strip().startswith('#')] + except (yaml.reader.ReaderError, UnicodeDecodeError): + with open(file_path, 'r', encoding="UTF-8", errors="ignore") as f: + lines = f.read().splitlines() + return [x for x in lines if x.strip() and not x.strip().startswith('#')] + + +def load_yaml_wildcard(file_path, key_prefix=''): + """Load a .yaml/.yml wildcard file and expand nested structures""" + global loaded_wildcards + + try: + with open(file_path, 'r', encoding="ISO-8859-1") as f: + yaml_data = yaml.load(f, Loader=yaml.FullLoader) + except (yaml.reader.ReaderError, UnicodeDecodeError): + with open(file_path, 'r', encoding="UTF-8", errors="ignore") as f: + yaml_data = yaml.load(f, Loader=yaml.FullLoader) + + if not yaml_data: + return [] + + # For nested YAML structures, expand into loaded_wildcards + result = [] + for k, v in yaml_data.items(): + if isinstance(v, list): + sub_key = wildcard_normalize(f"{key_prefix}/{k}") if key_prefix else wildcard_normalize(k) + loaded_wildcards[sub_key] = v + result.extend(v) + elif isinstance(v, dict): + # Recursive nested dict - register both parent and children keys + # Collect all values from nested structure for parent key + parent_key = wildcard_normalize(k) + parent_values = [] + + for k2, v2 in v.items(): + sub_key = wildcard_normalize(f"{k}/{k2}") + if isinstance(v2, list): + loaded_wildcards[sub_key] = v2 + parent_values.extend(v2) + elif isinstance(v2, str): + loaded_wildcards[sub_key] = [v2] + parent_values.append(v2) + elif isinstance(v2, (int, float)): + loaded_wildcards[sub_key] = [str(v2)] + parent_values.append(str(v2)) + + # Register parent key with all child values + if parent_values: + loaded_wildcards[parent_key] = parent_values + result.extend(parent_values) + elif isinstance(v, str): + sub_key = wildcard_normalize(f"{key_prefix}/{k}") if key_prefix else wildcard_normalize(k) + loaded_wildcards[sub_key] = [v] + elif isinstance(v, (int, float)): + sub_key = wildcard_normalize(f"{key_prefix}/{k}") if key_prefix else wildcard_normalize(k) + loaded_wildcards[sub_key] = [str(v)] + + return result if result else list(yaml_data.values()) + + +def is_on_demand_mode(): + """Check if wildcards are running in on-demand mode""" + return _on_demand_mode + + +def wildcard_normalize(x): + return x.replace("\\", "/").replace(' ', '-').lower() + + +def read_wildcard(k, v, on_demand=False): + """ + Read wildcard data with optional on-demand loading + + Args: + k: wildcard key + v: wildcard value (list, dict, str, or number) + on_demand: if True, store LazyWildcardLoader instead of actual data + """ + if isinstance(v, list): + k = wildcard_normalize(k) + wildcard_dict[k] = v + elif isinstance(v, dict): + for k2, v2 in v.items(): + new_key = f"{k}/{k2}" + new_key = wildcard_normalize(new_key) + read_wildcard(new_key, v2, on_demand) + elif isinstance(v, str): + k = wildcard_normalize(k) + wildcard_dict[k] = [v] + elif isinstance(v, (int, float)): + k = wildcard_normalize(k) + wildcard_dict[k] = [str(v)] + +def read_wildcard_dict(wildcard_path, on_demand=False): + """ + Read wildcard dictionary with optional on-demand loading + + Args: + wildcard_path: path to wildcard directory + on_demand: if True, use lazy loading to reduce memory usage + + Returns: + wildcard_dict + """ + global wildcard_dict + for root, directories, files in os.walk(wildcard_path, followlinks=True): + for file in files: + if file.endswith('.txt'): + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, wildcard_path) + key = wildcard_normalize(os.path.splitext(rel_path)[0]) + + if on_demand: + # Store lazy loader instead of actual data + wildcard_dict[key] = LazyWildcardLoader(file_path, 'txt') + else: + # Load data immediately (original behavior) + try: + with open(file_path, 'r', encoding="ISO-8859-1") as f: + lines = f.read().splitlines() + wildcard_dict[key] = [x for x in lines if x.strip() and not x.strip().startswith('#')] + except yaml.reader.ReaderError: + with open(file_path, 'r', encoding="UTF-8", errors="ignore") as f: + lines = f.read().splitlines() + wildcard_dict[key] = [x for x in lines if x.strip() and not x.strip().startswith('#')] + elif file.endswith('.yaml') or file.endswith('.yml'): + file_path = os.path.join(root, file) + + if on_demand: + # For YAML files in on-demand mode, we need to load and parse them + # since they may contain nested structures + loader = LazyWildcardLoader(file_path, 'yaml') + yaml_data = loader.get_data() + if yaml_data: + for k, v in yaml_data.items(): + read_wildcard(k, v, on_demand) + else: + # Load data immediately (original behavior) + try: + with open(file_path, 'r', encoding="ISO-8859-1") as f: + yaml_data = yaml.load(f, Loader=yaml.FullLoader) + except yaml.reader.ReaderError: + with open(file_path, 'r', encoding="UTF-8", errors="ignore") as f: + yaml_data = yaml.load(f, Loader=yaml.FullLoader) + + for k, v in yaml_data.items(): + read_wildcard(k, v, on_demand) + + return wildcard_dict + + +def process_comment_out(text): + lines = text.split('\n') + + lines0 = [] + flag = False + for line in lines: + if line.lstrip().startswith('#'): + flag = True + continue + + if len(lines0) == 0: + lines0.append(line) + elif flag: + lines0[-1] += ' ' + line + flag = False + else: + lines0.append(line) + + return '\n'.join(lines0) + + +def process(text, seed=None): + text = process_comment_out(text) + + if seed is not None: + random.seed(seed) + random_gen = np.random.default_rng(seed) + + local_wildcard_dict = get_wildcard_dict() + + def replace_options(string): + replacements_found = False + + def replace_option(match): + nonlocal replacements_found + options = match.group(1).split('|') + + multi_select_pattern = options[0].split('$$') + select_range = None + select_sep = ' ' + range_pattern = r'(\d+)(-(\d+))?' + range_pattern2 = r'-(\d+)' + wildcard_pattern = r"__([\w.\-+/*\\]+?)__" + + if len(multi_select_pattern) > 1: + r = re.match(range_pattern, options[0]) + + if r is None: + r = re.match(range_pattern2, options[0]) + a = '1' + b = r.group(1).strip() + else: + a = r.group(1).strip() + b = r.group(3) + if b is not None: + b = b.strip() + else: + b = a + + if r is not None: + if b is not None and is_numeric_string(a) and is_numeric_string(b): + # PATTERN: num1-num2 + select_range = int(a), int(b) + elif is_numeric_string(a): + # PATTERN: num + x = int(a) + select_range = (x, x) + + # Expand wildcard path or return the string after $$ + def expand_wildcard_or_return_string(options, pattern, wildcard_pattern): + matches = re.findall(wildcard_pattern, pattern) + if len(options) == 1 and matches: + # $$ + return get_wildcard_options(pattern) + else: + # $$opt1|opt2|... + options[0] = pattern + return options + + if select_range is not None and len(multi_select_pattern) == 2: + # PATTERN: count$$ + options = expand_wildcard_or_return_string(options, multi_select_pattern[1], wildcard_pattern ) + elif select_range is not None and len(multi_select_pattern) == 3: + # PATTERN: count$$ sep $$ + select_sep = multi_select_pattern[1] + options = expand_wildcard_or_return_string(options, multi_select_pattern[2], wildcard_pattern ) + + adjusted_probabilities = [] + + total_prob = 0 + + for option in options: + parts = option.split('::', 1) if isinstance(option, str) else f"{option}".split('::', 1) + + if len(parts) == 2 and is_numeric_string(parts[0].strip()): + config_value = float(parts[0].strip()) + else: + config_value = 1 # Default value if no configuration is provided + + adjusted_probabilities.append(config_value) + total_prob += config_value + + normalized_probabilities = [prob / total_prob for prob in adjusted_probabilities] + + if select_range is None: + select_count = 1 + else: + def calculate_max(_options_length, _max_select_range): + return min(_max_select_range + 1, _options_length + 1) if _max_select_range > 0 else _options_length + 1 + + def calculate_select_count(_max_value, _min_select_range, random_gen): + if max(_max_value, _min_select_range) <= 0: + return 0 + # fix: low >= high + elif _max_value == _min_select_range: + return _max_value + else: + # fix: low >= high + _low_value = min(_min_select_range, _max_value) + _high_value = max(_min_select_range, _max_value) + return random_gen.integers(low=_low_value, high=_high_value, size=1) + select_count = calculate_select_count(calculate_max(len(options), select_range[1]), select_range[0], random_gen) + + if select_count > len(options) or total_prob <= 1: + random_gen.shuffle(options) + selected_items = options + else: + selected_items = random_gen.choice(options, p=normalized_probabilities, size=select_count, replace=False) + + # x may be numpy.int32, convert to string + selected_items2 = [re.sub(r'^\s*[0-9.]+::', '', str(x), count=1) for x in selected_items] + replacement = select_sep.join(selected_items2) + if '::' in replacement: + pass + + replacements_found = True + return replacement + + pattern = r'(? 2: + base_name = keyword[2:] # Remove '*/' prefix + + logging.info(f"[Impact Pack] [get_wildcard_options] Pattern: keyword={keyword}, base={base_name}, on_demand={_on_demand_mode}, search_dict_size={len(search_dict)}") + + matched_count = 0 + for k in search_dict.keys(): + # Match if key ends with base_name or contains base_name/subdirs + # Pattern matching examples for base_name="dragon": + # "dragon" -> match (exact) + # "fantasy/dragon" -> match (nested file) + # "dragon/fire" -> match (subfolder) + # "fantasy/dragon/fire" -> match (deeply nested) + if (k == base_name or + k.endswith('/' + base_name) or + k.startswith(base_name + '/') or + ('/' + base_name + '/') in k): + logging.info(f"[Impact Pack] [get_wildcard_options] Matched: {k}") + v = get_wildcard_value(k) + if v: + total_patterns += v + found = True + matched_count += 1 + + logging.info(f"[Impact Pack] [get_wildcard_options] Result: matched={matched_count}, patterns={len(total_patterns)}") + else: + # General wildcard pattern matching + subpattern = keyword.replace('*', '.*').replace('+', '\\+') + for k in search_dict.keys(): + if re.match(subpattern, k) is not None or re.match(subpattern, k+'/') is not None: + # Load on-demand if needed + v = get_wildcard_value(k) + if v: + total_patterns += v + found = True + + if found: + options.extend(total_patterns) + # Note: Fallback to __*/name__ is handled in replace_wildcard, not here + + return options + + def replace_wildcard(string): + pattern = r"__([\w.\-+/*\\]+?)__" + matches = re.findall(pattern, string) + + replacements_found = False + + for match in matches: + keyword = match.lower() + keyword = wildcard_normalize(keyword) + + # Use get_wildcard_value for on-demand loading support + options = get_wildcard_value(keyword) + + if options is not None: + # look for adjusted probability + adjusted_probabilities = [] + total_prob = 0 + for option in options: + parts = option.split('::', 1) + if len(parts) == 2 and is_numeric_string(parts[0].strip()): + config_value = float(parts[0].strip()) + else: + config_value = 1 # Default value if no configuration is provided + + adjusted_probabilities.append(config_value) + total_prob += config_value + + normalized_probabilities = [prob / total_prob for prob in adjusted_probabilities] + selected_item = random_gen.choice(options, p=normalized_probabilities, replace=False) + replacement = re.sub(r'^\s*[0-9.]+::', '', selected_item, count=1) + replacements_found = True + string = string.replace(f"__{match}__", replacement, 1) + elif '*' in keyword: + total_patterns = [] + found = False + + # For wildcard patterns, search through available wildcards + search_dict = available_wildcards if _on_demand_mode else local_wildcard_dict + + # Special case: __*/name__ should match both 'name' and 'name/*' at any depth + if keyword.startswith('*/') and len(keyword) > 2: + base_name = keyword[2:] # Remove '*/' prefix + + for k in search_dict.keys(): + # Match if key ends with base_name or contains base_name/subdirs + # Pattern matching examples for base_name="dragon": + # "dragon" -> match (exact) + # "fantasy/dragon" -> match (nested file) + # "dragon/fire" -> match (subfolder) + # "fantasy/dragon/fire" -> match (deeply nested) + if (k == base_name or + k.endswith('/' + base_name) or + k.startswith(base_name + '/') or + ('/' + base_name + '/') in k): + v = get_wildcard_value(k) + if v: + total_patterns += v + found = True + else: + # General wildcard pattern matching + subpattern = keyword.replace('*', '.*').replace('+', '\\+') + for k in search_dict.keys(): + if re.match(subpattern, k) is not None or re.match(subpattern, k+'/') is not None: + # Load on-demand if needed + v = get_wildcard_value(k) + if v: + total_patterns += v + found = True + + if found: + replacement = random_gen.choice(total_patterns) + replacements_found = True + string = string.replace(f"__{match}__", replacement, 1) + elif '/' not in keyword: + string_fallback = string.replace(f"__{match}__", f"__*/{match}__", 1) + string, replacements_found = replace_wildcard(string_fallback) + + return string, replacements_found + + replace_depth = 100 + stop_unwrap = False + while not stop_unwrap and replace_depth > 1: + replace_depth -= 1 # prevent infinite loop + + option_quantifier = [e.groupdict() for e in RE_WildCardQuantifier.finditer(text)] + for match in option_quantifier: + keyword = match['keyword'].lower() + quantifier = int(match['quantifier']) if match['quantifier'] else 1 + replacement = '__|__'.join([keyword,] * quantifier) + wilder_keyword = keyword.replace('*', '\\*') + RE_TEMP = re.compile(fr"(?P\d+)#__(?P{wilder_keyword})__", re.IGNORECASE) + text = RE_TEMP.sub(f"__{replacement}__", text) + + # pass1: replace options + pass1, is_replaced1 = replace_options(text) + + while is_replaced1: + pass1, is_replaced1 = replace_options(pass1) + + # pass2: replace wildcards + text, is_replaced2 = replace_wildcard(pass1) + stop_unwrap = not is_replaced1 and not is_replaced2 + + return text + + +def is_numeric_string(input_str): + return re.match(r'^-?(\d*\.?\d+|\d+\.?\d*)$', input_str) is not None + + +def safe_float(x): + if is_numeric_string(x): + return float(x) + else: + return 1.0 + + +def extract_lora_values(string): + pattern = r']+)>' + matches = re.findall(pattern, string) + + def touch_lbw(text): + return re.sub(r'LBW=[A-Za-z][A-Za-z0-9_-]*:', r'LBW=', text) + + items = [touch_lbw(match.strip(':')) for match in matches] + + added = set() + result = [] + for item in items: + item = item.split(':') + + lora = None + a = None + b = None + lbw = None + lbw_a = None + lbw_b = None + loader = None + + if len(item) > 0: + lora = item[0] + + for sub_item in item[1:]: + if is_numeric_string(sub_item): + if a is None: + a = float(sub_item) + elif b is None: + b = float(sub_item) + elif sub_item.startswith("LBW="): + for lbw_item in sub_item[4:].split(';'): + if lbw_item.startswith("A="): + lbw_a = safe_float(lbw_item[2:].strip()) + elif lbw_item.startswith("B="): + lbw_b = safe_float(lbw_item[2:].strip()) + elif lbw_item.strip() != '': + lbw = lbw_item + elif sub_item.startswith("LOADER="): + loader = sub_item[7:] + + if a is None: + a = 1.0 + if b is None: + b = a + + if lora is not None and lora not in added: + result.append((lora, a, b, lbw, lbw_a, lbw_b, loader)) + added.add(lora) + + return result + + +def remove_lora_tags(string): + pattern = r']+>' + result = re.sub(pattern, '', string) + + return result + + +def resolve_lora_name(lora_name_cache, name): + if os.path.exists(name): + return name + else: + if len(lora_name_cache) == 0: + lora_name_cache.extend(folder_paths.get_filename_list("loras")) + + for x in lora_name_cache: + if x.endswith(name): + return x + + return None + + +def process_with_loras(wildcard_opt, model, clip, clip_encoder=None, seed=None, processed=None): + """ + process wildcard text including loras + + :param wildcard_opt: wildcard text + :param model: model + :param clip: clip + :param clip_encoder: you can pass custom encoder such as adv_cliptext_encode + :param seed: seed for populating + :param processed: output variable - [pass1, pass2, pass3] will be saved into passed list + :return: model, clip, conditioning + """ + + lora_name_cache = [] + + pass1 = process(wildcard_opt, seed) + loras = extract_lora_values(pass1) + pass2 = remove_lora_tags(pass1) + + for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b, loader in loras: + lora_name_ext = lora_name.split('.') + if ('.'+lora_name_ext[-1]) not in folder_paths.supported_pt_extensions: + lora_name = lora_name+".safetensors" + + orig_lora_name = lora_name + lora_name = resolve_lora_name(lora_name_cache, lora_name) + + if lora_name is not None: + path = folder_paths.get_full_path("loras", lora_name) + else: + path = None + + if path is not None: + logging.info(f"LOAD LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}, LOADER={loader}") + + if loader is not None: + if loader == 'nunchaku': + if 'NunchakuFluxLoraLoader' not in nodes.NODE_CLASS_MAPPINGS: + logging.warning("To use `LOADER=nunchaku`, 'ComfyUI-nunchaku' is required. The LOADER= attribute is being ignored.") + cls = nodes.NODE_CLASS_MAPPINGS['NunchakuFluxLoraLoader'] + model = cls().load_lora(model, lora_name, model_weight)[0] + else: + logging.warning(f"LORA LOADER NOT FOUND: '{loader}'") + else: + def default_lora(): + return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight) + + if lbw is not None: + if 'LoraLoaderBlockWeight //Inspire' not in nodes.NODE_CLASS_MAPPINGS: + utils.try_install_custom_node( + 'https://github.com/ltdrdata/ComfyUI-Inspire-Pack', + "To use 'LBW=' syntax in wildcards, 'Inspire Pack' extension is required.") + + logging.warning("'LBW(Lora Block Weight)' is given, but the 'Inspire Pack' is not installed. The LBW= attribute is being ignored.") + model, clip = default_lora() + else: + cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] + model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, "", lbw) + + else: + model, clip = default_lora() + else: + logging.warning(f"LORA NOT FOUND: {orig_lora_name}") + + pass3 = [x.strip() for x in pass2.split("BREAK")] + pass3 = [x for x in pass3 if x != ''] + + if len(pass3) == 0: + pass3 = [''] + + pass3_str = [f'[{x}]' for x in pass3] + logging.info(f"CLIP: {str.join(' + ', pass3_str)}") + + result = None + + for prompt in pass3: + if clip_encoder is None: + cur = nodes.CLIPTextEncode().encode(clip, prompt)[0] + else: + cur = clip_encoder.encode(clip, prompt)[0] + + if result is not None: + result = nodes.ConditioningConcat().concat(result, cur)[0] + else: + result = cur + + if processed is not None: + processed.append(pass1) + processed.append(pass2) + processed.append(pass3) + + return model, clip, result + + +def starts_with_regex(pattern, text): + regex = re.compile(pattern) + return regex.match(text) + + +def split_to_dict(text): + pattern = r'\[([A-Za-z0-9_. ]+)\]([^\[]+)(?=\[|$)' + matches = re.findall(pattern, text) + + result_dict = {key: value.strip() for key, value in matches} + + return result_dict + + +class WildcardChooser: + def __init__(self, items, randomize_when_exhaust): + self.i = 0 + self.items = items + self.randomize_when_exhaust = randomize_when_exhaust + + def get(self, seg): + if self.i >= len(self.items): + self.i = 0 + if self.randomize_when_exhaust: + random.shuffle(self.items) + + item = self.items[self.i] + self.i += 1 + + return item + + +class WildcardChooserDict: + def __init__(self, items): + self.items = items + + def get(self, seg): + text = "" + if 'ALL' in self.items: + text = self.items['ALL'] + + if seg.label in self.items: + text += self.items[seg.label] + + return text + + +def split_string_with_sep(input_string): + sep_pattern = r'\[SEP(?:\:\w+)?\]' + + substrings = re.split(sep_pattern, input_string) + + result_list = [None] + matches = re.findall(sep_pattern, input_string) + for i, substring in enumerate(substrings): + result_list.append(substring) + if i < len(matches): + if matches[i] == '[SEP]': + result_list.append(None) + elif matches[i] == '[SEP:R]': + result_list.append(random.randint(0, 1125899906842624)) + else: + try: + seed = int(matches[i][5:-1]) + except Exception: + seed = None + result_list.append(seed) + + iterable = iter(result_list) + return list(zip(iterable, iterable)) + + +def process_wildcard_for_segs(wildcard): + if wildcard.startswith('[LAB]'): + raw_items = split_to_dict(wildcard) + + items = {} + for k, v in raw_items.items(): + v = v.strip() + if v != '': + items[k] = v + + return 'LAB', WildcardChooserDict(items) + + else: + match = starts_with_regex(r"\[(ASC-SIZE|DSC-SIZE|ASC|DSC|RND)\]", wildcard) + + if match: + mode = match[1] + items = split_string_with_sep(wildcard[len(match[0]):]) + + if mode == 'RND': + random.shuffle(items) + return mode, WildcardChooser(items, True) + else: + return mode, WildcardChooser(items, False) + + else: + return None, WildcardChooser([(None, wildcard)], False) + + +def load_yaml_files_only(wildcard_path): + """ + Load only YAML wildcard files from a directory (for on-demand mode). + + YAML files must be pre-loaded because wildcard keys are inside the file contents. + Unlike TXT files where "samples/flower.txt" → "__samples/flower__" (file path = key), + YAML files like "colors.yaml" can contain multiple keys (colors/warm, colors/cold, etc.) + that are only discoverable by parsing the entire file content. + + Example: + colors.yaml: + warm: [red, orange, yellow] → __colors/warm__ + cold: [blue, green, purple] → __colors/cold__ + + To know that "colors/warm" exists, we must parse colors.yaml completely. + Therefore, YAML files cannot be truly on-demand loaded. + + Args: + wildcard_path: Directory to scan for YAML files + + Returns: + Number of YAML wildcard files loaded (not keys) + """ + global loaded_wildcards + + yaml_count = 0 + try: + for root, directories, files in os.walk(wildcard_path, followlinks=True): + for file in files: + if file.endswith('.yaml') or file.endswith('.yml'): + file_path = os.path.join(root, file) + try: + # Load YAML file and register all sub-keys + load_yaml_wildcard(file_path, key_prefix='') + yaml_count += 1 + logging.debug(f"[Impact Pack] Pre-loaded YAML file: {file_path}") + except Exception as e: + logging.warning(f"[Impact Pack] Failed to load YAML file {file_path}: {e}") + except (OSError, FileNotFoundError) as e: + logging.warning(f"[Impact Pack] Error scanning YAML files in {wildcard_path}: {e}") + + return yaml_count + + +def get_cache_limit(): + """Get cache limit from config or use default""" + try: + cfg = config.get_config() + if 'wildcard_cache_limit_mb' in cfg: + return cfg['wildcard_cache_limit_mb'] * 1024 * 1024 # Convert MB to bytes + except Exception: + pass + return WILDCARD_CACHE_LIMIT + + +def wildcard_load(): + """ + Load wildcards with automatic on-demand mode when total size exceeds limit. + + If total wildcard file size < cache_limit (default 50MB): + - Full cache mode: all data loaded into memory (original behavior) + If total wildcard file size >= cache_limit: + - On-demand mode: TXT files loaded dynamically when accessed + - YAML files always pre-loaded immediately (limitation) + + YAML Limitation: + YAML wildcards must be pre-loaded because wildcard keys are embedded + inside the file contents, not in the file path. + + TXT files: "samples/flower.txt" → key is "__samples/flower__" (file path = key) + YAML files: "colors.yaml" contains: + warm: [red, orange] → key is "__colors/warm__" + cold: [blue, green] → key is "__colors/cold__" + + To discover that "colors/warm" exists, we must parse colors.yaml completely. + Therefore, YAML files cannot be truly on-demand loaded and are pre-loaded at startup. + """ + global wildcard_dict, available_wildcards, loaded_wildcards, _on_demand_mode + wildcard_dict = {} + available_wildcards = {} + loaded_wildcards = {} + _on_demand_mode = False + + with wildcard_lock: + # Calculate total size of wildcard files (with early termination) + cache_limit = get_cache_limit() + total_size = calculate_directory_size(wildcards_path, limit=cache_limit) + + # Add custom wildcards directory size if it exists + custom_wildcards_path = None + try: + custom_wildcards_path = config.get_config().get('custom_wildcards') + if custom_wildcards_path and os.path.exists(custom_wildcards_path): + # Early termination: if already exceeded, don't scan custom dir + if total_size < cache_limit: + custom_size = calculate_directory_size(custom_wildcards_path, + limit=cache_limit - total_size) + total_size += custom_size + except Exception: + pass + + # Determine loading mode based on total size + if total_size >= cache_limit: + _on_demand_mode = True + logging.info(f"[Impact Pack] Wildcard total size ({total_size / (1024*1024):.2f} MB) " + f"exceeds cache limit ({cache_limit / (1024*1024):.2f} MB). " + f"Using on-demand loading mode (TXT files loaded dynamically).") + + # On-demand mode: Scan for TXT file metadata and load YAML files immediately + # Metadata scan discovers TXT files without loading their content + txt_count = scan_wildcard_metadata(wildcards_path) + if custom_wildcards_path and os.path.exists(custom_wildcards_path): + txt_count += scan_wildcard_metadata(custom_wildcards_path) + + # Load YAML files immediately (limitation: YAML keys are inside file content) + yaml_count = load_yaml_files_only(wildcards_path) + if custom_wildcards_path and os.path.exists(custom_wildcards_path): + yaml_count += load_yaml_files_only(custom_wildcards_path) + + logging.info(f"[Impact Pack] On-demand mode active. " + f"Discovered {txt_count} TXT wildcards (metadata only). " + f"Pre-loaded {yaml_count} YAML wildcards. " + f"TXT wildcard content will be loaded only when accessed.") + else: + logging.info(f"[Impact Pack] Wildcard total size ({total_size / (1024*1024):.2f} MB) " + f"is within cache limit ({cache_limit / (1024*1024):.2f} MB). " + f"Using full cache mode.") + + # Full cache mode: load all data immediately (original behavior) + read_wildcard_dict(wildcards_path, on_demand=False) + + try: + if custom_wildcards_path: + read_wildcard_dict(custom_wildcards_path, on_demand=False) + except Exception: + logging.info("[Impact Pack] Failed to load custom wildcards directory.") + + logging.info("[Impact Pack] Wildcards loading done.") diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/thirdparty/__pycache__/noise_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-Impact-Pack/modules/thirdparty/__pycache__/noise_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3296e49492895142ffec9d436dd5fe5badd80510 Binary files /dev/null and b/zavodik/nodes/ComfyUI-Impact-Pack/modules/thirdparty/__pycache__/noise_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/modules/thirdparty/noise_nodes.py b/zavodik/nodes/ComfyUI-Impact-Pack/modules/thirdparty/noise_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f3627a740080c90dc97f606eec9a879e88db23e8 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/modules/thirdparty/noise_nodes.py @@ -0,0 +1,83 @@ +# Due to the current lack of maintenance for the `ComfyUI_Noise` extension, +# I have copied the code from the applied PR. +# https://github.com/BlenderNeko/ComfyUI_Noise/pull/13/files + +import comfy +import torch + +class Unsampler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "end_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "normalize": (["disable", "enable"],), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "latent_image": ("LATENT",), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "unsampler" + + CATEGORY = "sampling" + + def unsampler(self, model, cfg, sampler_name, steps, end_at_step, scheduler, normalize, positive, negative, + latent_image): + normalize = normalize == "enable" + device = comfy.model_management.get_torch_device() + latent = latent_image + latent_image = latent["samples"] + + end_at_step = min(end_at_step, steps - 1) + end_at_step = steps - end_at_step + + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + noise_mask = None + if "noise_mask" in latent: + noise_mask = comfy.sampler_helpers.prepare_mask(latent["noise_mask"], noise.shape, device) + + noise = noise.to(device) + latent_image = latent_image.to(device) + + conds0 = \ + {"positive": comfy.sampler_helpers.convert_cond(positive), + "negative": comfy.sampler_helpers.convert_cond(negative)} + + conds = {} + for k in conds0: + conds[k] = list(map(lambda a: a.copy(), conds0[k])) + + models, inference_memory = comfy.sampler_helpers.get_additional_models(conds, model.model_dtype()) + + comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise.shape) + inference_memory) + + sampler = comfy.samplers.KSampler(model, steps=steps, device=device, sampler=sampler_name, + scheduler=scheduler, denoise=1.0, model_options=model.model_options) + + sigmas = sampler.sigmas.flip(0) + 0.0001 + + pbar = comfy.utils.ProgressBar(steps) + + def callback(step, x0, x, total_steps): + pbar.update_absolute(step + 1, total_steps) + + samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, + force_full_denoise=False, denoise_mask=noise_mask, sigmas=sigmas, start_step=0, + last_step=end_at_step, callback=callback) + if normalize: + # technically doesn't normalize because unsampling is not guaranteed to end at a std given by the schedule + samples -= samples.mean() + samples /= samples.std() + samples = samples.cpu() + + comfy.sampler_helpers.cleanup_additional_models(models) + + out = latent.copy() + out["samples"] = samples + return (out,) + diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/node_list.json b/zavodik/nodes/ComfyUI-Impact-Pack/node_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f1bfe967be534ed8f55bc506de24220f59ca4769 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/node_list.json @@ -0,0 +1,4 @@ +{ + "Segs Mask": "This node is renamed to 'ImpactSegsAndMask'", + "Segs Mask ForEach": "This node is renamed to 'ImpactSegsAndMaskForEach'" +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/notebook/comfyui_colab_impact_pack.ipynb b/zavodik/nodes/ComfyUI-Impact-Pack/notebook/comfyui_colab_impact_pack.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..68e2ee0cee30da4798cdf23a8f3a889370b796dd --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/notebook/comfyui_colab_impact_pack.ipynb @@ -0,0 +1,172 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "aaaaaaaaaa" + }, + "source": [ + "Git clone the repo and install the requirements. (ignore the pip errors about protobuf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bbbbbbbbbb" + }, + "outputs": [], + "source": [ + "#@title Environment Setup\n", + "\n", + "from pathlib import Path\n", + "\n", + "OPTIONS = {}\n", + "\n", + "WORKSPACE = 'ComfyUI'\n", + "USE_GOOGLE_DRIVE = True #@param {type:\"boolean\"}\n", + "UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n", + "\n", + "OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n", + "OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n", + "\n", + "if OPTIONS['USE_GOOGLE_DRIVE']:\n", + " !echo \"Mounting Google Drive...\"\n", + " %cd /\n", + " \n", + " from google.colab import drive\n", + " drive.mount('/content/drive')\n", + "\n", + " WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n", + " \n", + " %cd /content/drive/MyDrive\n", + "\n", + "![ ! -d $WORKSPACE ] && echo \"-= Initial setup ComfyUI (Original)=-\" && git clone https://github.com/comfyanonymous/ComfyUI\n", + "%cd $WORKSPACE\n", + "\n", + "if OPTIONS['UPDATE_COMFY_UI']:\n", + " !echo \"-= Updating ComfyUI =-\"\n", + " !git pull\n", + " !rm \"/content/drive/MyDrive/ComfyUI/custom_nodes/comfyui-impact-pack.py\"\n", + "\n", + "%cd custom_nodes\n", + "!git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack\n", + "%cd $WORKSPACE\n", + "\n", + "!echo -= Install dependencies =-\n", + "!pip -q install xformers -r requirements.txt\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "kkkkkkkkkkkkkk" + }, + "source": [ + "### Run ComfyUI with localtunnel (Recommended Way)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "jjjjjjjjjjjjj", + "outputId": "83be9411-d939-4813-e6c1-80e75bf8e80d" + }, + "outputs": [], + "source": [ + "!npm install -g localtunnel\n", + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\")\n", + " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", + " for line in p.stdout:\n", + " print(line.decode(), end='')\n", + "\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "gggggggggg" + }, + "source": [ + "### Run ComfyUI with colab iframe (use only in case the previous way with localtunnel doesn't work)\n", + "\n", + "You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n", + "\n", + "If you want to open it in another window use the link.\n", + "\n", + "Note that some UI features like live image previews won't work because the colab iframe blocks websockets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hhhhhhhhhh" + }, + "outputs": [], + "source": [ + "import threading\n", + "import time\n", + "import socket\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " from google.colab import output\n", + " output.serve_kernel_port_as_iframe(port, height=1024)\n", + " print(\"to open it in a window you can open this link here:\")\n", + " output.serve_kernel_port_as_window(port)\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/pyproject.toml b/zavodik/nodes/ComfyUI-Impact-Pack/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..07c37a1ab67a9a82efc515d5537a6a5dd62ce1aa --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/pyproject.toml @@ -0,0 +1,26 @@ +[project] +name = "comfyui-impact-pack" +description = "This node pack offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler." +version = "8.28.2" +license = { file = "LICENSE.txt" } +dependencies = [ + "segment-anything", + "scikit-image", + "piexif", + "transformers", + "opencv-python-headless", + "scipy", + "numpy", + "dill", + "matplotlib", + "sam2 @ git+https://github.com/facebookresearch/sam2" +] + +[project.urls] +Repository = "https://github.com/ltdrdata/ComfyUI-Impact-Pack" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "drltdata" +DisplayName = "ComfyUI Impact Pack" +Icon = "" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/requirements.txt b/zavodik/nodes/ComfyUI-Impact-Pack/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed6286121ecdfc984ea994fd693d3c89679f9d01 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/requirements.txt @@ -0,0 +1,10 @@ +segment-anything +scikit-image +piexif +transformers +opencv-python-headless +scipy +numpy +dill +matplotlib +git+https://github.com/facebookresearch/sam2 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/ruff.toml b/zavodik/nodes/ComfyUI-Impact-Pack/ruff.toml new file mode 100644 index 0000000000000000000000000000000000000000..81d03491a36016722cb2f61fb4a3fc79f5ebb918 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/ruff.toml @@ -0,0 +1,3 @@ +[lint] +ignore = ["E402","E701"] +exclude = ["install.py", "*.ipynb"] diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/README.md b/zavodik/nodes/ComfyUI-Impact-Pack/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a645106cbeab8f6ef08f7e5377609d7d11d37143 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/README.md @@ -0,0 +1,136 @@ +# Wildcard System Test Suite + +Comprehensive test suite for ComfyUI Impact Pack wildcard system. + +## Test Suites + +### test_encoding.sh (15 tests) +**Purpose**: UTF-8 multi-language encoding validation +**Port**: 8198 +**Coverage**: +- Korean Hangul characters +- Emoji support +- Chinese characters +- Arabic RTL text +- Mathematical and currency symbols +- Mixed multi-language content +- UTF-8 in dynamic prompts, quantifiers, multi-select + +### test_error_handling.sh (10 tests) +**Purpose**: Graceful error handling verification +**Port**: 8197 +**Coverage**: +- Non-existent wildcards +- Circular reference detection (max 100 iterations) +- Malformed syntax +- Deep nesting without crashes +- Multiple circular references + +### test_edge_cases.sh (20 tests) +**Purpose**: Edge case and boundary condition validation +**Port**: 8196 +**Coverage**: +- Empty lines and whitespace filtering +- Very long lines (>1000 characters) +- Special characters preservation +- Case-insensitive matching +- Comment line filtering +- Pattern matching (__*/name__) +- Quantifiers (N#__wildcard__) +- Complex syntax combinations + +### test_deep_nesting.sh (17 tests) +**Purpose**: Transitive wildcard expansion and depth-agnostic pattern matching +**Port**: 8194 +**Coverage**: +- 7-level transitive expansion (directory depth + file references) +- All depth levels (1-7) individually +- Mixed depth combinations +- Nesting with quantifiers and multi-select +- Nesting with weighted selection +- Depth-agnostic pattern matching (`__*/name__`) +- Complex multi-wildcard prompts + +### test_ondemand_loading.sh (8 tests) +**Purpose**: Progressive on-demand wildcard loading +**Port**: 8193 +**Coverage**: +- Small cache (1MB) - on-demand enabled +- Moderate cache (10MB) - progressive loading +- Large cache (100MB) - eager loading +- Aggressive lazy loading (0.5MB) +- Balanced mode (50MB default) +- On-demand with deep nesting +- On-demand with multiple wildcards +- Cache boundary testing + +### test_config_quotes.sh (5 tests) +**Purpose**: Configuration path handling validation +**Port**: 8192 +**Coverage**: +- Unquoted paths +- Double-quoted paths +- Single-quoted paths +- Paths with spaces +- Mixed quote scenarios + +### test_dynamic_prompts_full.sh (11 tests) +**Purpose**: Comprehensive dynamic prompt feature validation with statistical analysis +**Port**: 8188 +**Coverage**: +- **Multiselect** (4 tests): 2-item, 3-item, single-item, max-item with separator validation +- **Weighted Selection** (5 tests): 10:1 ratio, equal weights, extreme bias, multi-level weights, default mixing +- **Basic Selection** (2 tests): Simple random, nested selection +- Statistical distribution verification (100+ iterations per test) +- Duplicate detection and item count validation +- Separator correctness validation + +## Quick Start + +```bash +# Run individual test +bash test_encoding.sh + +# Run all tests +bash test_encoding.sh +bash test_error_handling.sh +bash test_edge_cases.sh +bash test_deep_nesting.sh +bash test_ondemand_loading.sh +bash test_config_quotes.sh +bash test_dynamic_prompts_full.sh +``` + +## Test Infrastructure + +- **Configuration**: Each test creates `impact-pack.ini` with test wildcard path +- **Server Lifecycle**: Automatic server start/stop with dedicated ports +- **Cleanup**: Automatic cleanup on test completion +- **Logging**: Detailed logs in `/tmp/*_test.log` + +## Test Samples + +Located in `wildcards/samples/`: +- `아름다운색.txt` - Korean UTF-8 test with 12 symbolic colors +- `test_encoding_*.txt` - UTF-8 encoding test files +- `test_edge_*.txt` - Edge case test files +- `test_error_*.txt` - Error handling test files +- `test_nesting_*.txt` - Nesting test files (7 levels) +- `patterns/` - Subdirectory for pattern matching tests + +## Status + +✅ **86 tests, 100% pass rate** (15+10+20+17+8+5+11) +✅ **Production ready** +✅ **Complete PRD coverage** +✅ **On-demand loading validated** +✅ **Config quotes handling validated** +✅ **Dynamic prompts statistically validated** +✅ **Weighted selection verified (correct {weight::option} syntax)** +✅ **Pattern matching validated (depth-agnostic __*/name__)** + +## Documentation + +- [Wildcard System PRD](../docs/wildcards/WILDCARD_SYSTEM_PRD.md) +- [System Design](../docs/wildcards/WILDCARD_SYSTEM_DESIGN.md) +- [Testing Guide](../docs/wildcards/WILDCARD_TESTING_GUIDE.md) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/RUN_ALL_TESTS.md b/zavodik/nodes/ComfyUI-Impact-Pack/tests/RUN_ALL_TESTS.md new file mode 100644 index 0000000000000000000000000000000000000000..1f95069fe3cb42c9bcd93d9a37168b1c7bd5d90c --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/RUN_ALL_TESTS.md @@ -0,0 +1,73 @@ +# Run All Tests + +Execute the complete wildcard system test suite. + +## Quick Run + +```bash +cd /mnt/teratera/git/ComfyUI/custom_nodes/comfyui-impact-pack/tests + +bash test_encoding.sh && \ +bash test_error_handling.sh && \ +bash test_edge_cases.sh && \ +bash test_deep_nesting.sh && \ +bash test_ondemand_loading.sh && \ +bash test_config_quotes.sh && \ +bash test_dynamic_prompts_full.sh + +echo "" +echo "==========================================" +echo "Test Suite Complete" +echo "==========================================" +echo "Total: 86 tests across 7 suites" +echo "" +``` + +## Individual Tests + +```bash +# UTF-8 Encoding (15 tests) +bash test_encoding.sh + +# Error Handling (10 tests) +bash test_error_handling.sh + +# Edge Cases (20 tests) +bash test_edge_cases.sh + +# Deep Nesting (15 tests) +bash test_deep_nesting.sh + +# On-Demand Loading (8 tests) +bash test_ondemand_loading.sh + +# Config Quotes (5 tests) +bash test_config_quotes.sh + +# Dynamic Prompts Full (11 tests) +bash test_dynamic_prompts_full.sh +``` + +## Test Summary + +Each test suite: +- ✅ Starts dedicated ComfyUI server on unique port +- ✅ Configures test wildcard path +- ✅ Runs comprehensive test cases +- ✅ Validates results +- ✅ Cleans up automatically + +## Expected Results + +All 89 tests should pass (100% pass rate). + +## Logs + +Test logs are saved in `/tmp/`: +- `/tmp/encoding_test.log` +- `/tmp/error_handling_test.log` +- `/tmp/edge_cases_test.log` +- `/tmp/deep_nesting_test.log` +- `/tmp/ondemand_test.log` +- `/tmp/config_quotes_test.log` +- `/tmp/dynamic_prompt_full_validation.log` diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/restart_test_server.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/restart_test_server.sh new file mode 100644 index 0000000000000000000000000000000000000000..3d1baf4222aa9362422decd92131d916dd5feaa1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/restart_test_server.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# restart_test_server.sh +# ComfyUI 서버를 빠르게 재시작하는 유틸리티 스크립트 +# Usage: bash restart_test_server.sh [PORT] + +PORT=${1:-8188} # 기본 포트 8188 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +LOG_FILE="/tmp/comfyui_test_${PORT}.log" + +echo "==========================================" +echo "ComfyUI Test Server Restart Utility" +echo "==========================================" +echo "Port: $PORT" +echo "Log: $LOG_FILE" +echo "" + +# 1. 기존 서버 종료 +echo "🛑 Stopping existing server..." +pkill -f "python.*main.py" +sleep 2 + +# 프로세스 종료 확인 +if pgrep -f "python.*main.py" > /dev/null; then + echo "⚠️ Warning: Some processes still running" + ps aux | grep main.py | grep -v grep + echo "Forcing kill..." + pkill -9 -f "python.*main.py" + sleep 1 +fi +echo "✅ Server stopped" + +# 2. 서버 시작 +echo "" +echo "🚀 Starting server on port $PORT..." +cd "$COMFYUI_DIR" || { + echo "❌ Error: Cannot access $COMFYUI_DIR" + exit 1 +} + +# 백그라운드로 서버 시작 +bash run.sh --listen 127.0.0.1 --port "$PORT" > "$LOG_FILE" 2>&1 & +SERVER_PID=$! + +echo "Server PID: $SERVER_PID" +echo "" + +# 3. 서버 준비 대기 +echo "⏳ Waiting for server startup..." +for i in {1..30}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "" + echo "✅ Server ready on port $PORT (${i}s)" + echo "📝 Log: $LOG_FILE" + echo "🔗 URL: http://127.0.0.1:$PORT" + echo "" + echo "Test endpoints:" + echo " curl http://127.0.0.1:$PORT/impact/wildcards/list" + echo " curl http://127.0.0.1:$PORT/impact/wildcards/list/loaded" + exit 0 + fi + echo -n "." +done + +# 타임아웃 +echo "" +echo "❌ Server failed to start within 30 seconds" +echo "📝 Check log: $LOG_FILE" +echo "" +echo "Last 20 lines of log:" +tail -20 "$LOG_FILE" +exit 1 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_config_quotes.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_config_quotes.sh new file mode 100644 index 0000000000000000000000000000000000000000..cca23ad41e8be76273896215361d408bbab907bd --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_config_quotes.sh @@ -0,0 +1,159 @@ +#!/bin/bash +# Config Path Quotes Test Suite +# Tests handling of quoted paths in impact-pack.ini + +set -e + +PORT=8192 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" +LOG_FILE="/tmp/config_quotes_test.log" + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo "==========================================" +echo "Config Path Quotes Test Suite" +echo "==========================================" +echo "Port: $PORT" +echo "Testing: Quoted path handling in config" +echo "" + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + rm -f "$CONFIG_FILE" + echo "Cleanup complete" +} + +trap cleanup EXIT + +# Test function +test_config_format() { + local TEST_NUM=$1 + local DESCRIPTION=$2 + local PATH_VALUE=$3 + local PROMPT=$4 + local SEED=$5 + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Path format: ${YELLOW}$PATH_VALUE${NC}" + + # Kill existing server + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + sleep 2 + + # Create config with specific path format + cat > "$CONFIG_FILE" << EOF +[default] +custom_wildcards = $PATH_VALUE +wildcard_cache_limit_mb = 50 +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +disable_gpu_opencv = True +EOF + + echo "Config created:" + grep "custom_wildcards" "$CONFIG_FILE" + + # Start server + cd "$COMFYUI_DIR" + bash run.sh --listen 127.0.0.1 --port $PORT > "$LOG_FILE" 2>&1 & + SERVER_PID=$! + + # Wait for server + for i in {1..60}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "✅ Server ready (${i}s)" + break + fi + if [ $i -eq 60 ]; then + echo "${RED}❌ Server failed to start${NC}" + echo "Log tail:" + tail -20 "$LOG_FILE" + exit 1 + fi + done + + # Test wildcard expansion + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Result: ${GREEN}$RESULT${NC}" + + if [ "$RESULT" != "ERROR" ] && [ -n "$RESULT" ] && ! echo "$RESULT" | grep -q "__"; then + echo "Status: ${GREEN}✅ PASS - Path correctly handled${NC}" + else + echo "Status: ${RED}❌ FAIL - Path not working${NC}" + echo "Checking log for errors..." + grep -i "custom_wildcards\|wildcard" "$LOG_FILE" | tail -5 + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Test 1: No quotes (standard) +test_config_format "01" "No quotes (standard)" \ + "$IMPACT_DIR/tests/wildcards/samples" \ + "__아름다운색__" \ + 100 + +# Test 2: Double quotes +test_config_format "02" "Double quotes" \ + "\"$IMPACT_DIR/tests/wildcards/samples\"" \ + "__아름다운색__" \ + 200 + +# Test 3: Single quotes +test_config_format "03" "Single quotes" \ + "'$IMPACT_DIR/tests/wildcards/samples'" \ + "__아름다운색__" \ + 300 + +# Test 4: Mixed quotes (edge case) +test_config_format "04" "Path with spaces (double quotes)" \ + "\"$IMPACT_DIR/tests/wildcards/samples\"" \ + "__test_nesting_level1__" \ + 400 + +# Test 5: Absolute path no quotes +test_config_format "05" "Absolute path no quotes" \ + "$IMPACT_DIR/tests/wildcards/samples" \ + "__test_encoding_emoji__" \ + 500 + +echo "" +echo "==========================================" +echo "Summary" +echo "==========================================" +echo "${GREEN}✅ Config quotes tests completed${NC}" +echo "" +echo "Test results:" +echo " 1. No quotes (standard) ✓" +echo " 2. Double quotes ✓" +echo " 3. Single quotes ✓" +echo " 4. Path with spaces ✓" +echo " 5. Absolute path ✓" +echo "" +echo "Quote handling verified:" +echo " - Strip double quotes (\") ✓" +echo " - Strip single quotes (') ✓" +echo " - Handle unquoted paths ✓" +echo "" +echo "Log file: $LOG_FILE" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_deep_nesting.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_deep_nesting.sh new file mode 100644 index 0000000000000000000000000000000000000000..11c416ef1b6cb4c624df0ea72ab1c661b52639a7 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_deep_nesting.sh @@ -0,0 +1,280 @@ +#!/bin/bash +# Deep Nesting Test Suite +# Tests transitive wildcard expansion up to 7 levels + +set -e + +PORT=8194 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" +LOG_FILE="/tmp/deep_nesting_test.log" + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' + +echo "==========================================" +echo "Deep Nesting Test Suite (7 Levels)" +echo "==========================================" +echo "Port: $PORT" +echo "Testing: Transitive wildcard expansion" +echo "" + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + rm -f "$CONFIG_FILE" + echo "Cleanup complete" +} + +trap cleanup EXIT + +# Kill any existing server on this port +echo "Killing any existing server on port $PORT..." +pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true +sleep 2 + +# Setup configuration +echo "Setting up configuration..." +cat > "$CONFIG_FILE" << EOF +[default] +custom_wildcards = $IMPACT_DIR/tests/wildcards/samples +wildcard_cache_limit_mb = 50 +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +disable_gpu_opencv = True +EOF + +echo "Configuration created: custom_wildcards = $IMPACT_DIR/tests/wildcards/samples" +echo "" + +# Start server +echo "Starting ComfyUI server on port $PORT..." +cd "$COMFYUI_DIR" +bash run.sh --listen 127.0.0.1 --port $PORT > "$LOG_FILE" 2>&1 & +SERVER_PID=$! +echo "Server PID: $SERVER_PID" + +# Wait for server startup +echo "Waiting for server startup..." +for i in {1..60}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "✅ Server ready (${i}s)" + break + fi + if [ $((i % 10)) -eq 0 ]; then + echo " ... ${i}s elapsed" + fi + if [ $i -eq 60 ]; then + echo "" + echo "${RED}❌ Server failed to start within 60 seconds${NC}" + echo "Log tail:" + tail -20 "$LOG_FILE" + exit 1 + fi +done + +echo "" + +# Test function for nesting +test_nesting() { + local TEST_NUM=$1 + local DESCRIPTION=$2 + local PROMPT=$3 + local SEED=$4 + local EXPECTED_DEPTH=$5 + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo "Seed: $SEED" + echo "Expected nesting depth: $EXPECTED_DEPTH" + + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Result: ${GREEN}$RESULT${NC}" + + # Check if result contains any unexpanded wildcards + if echo "$RESULT" | grep -q "__.*__"; then + echo "Status: ${YELLOW}⚠️ WARNING - Contains unexpanded wildcards${NC}" + echo "Unexpanded: $(echo "$RESULT" | grep -o '__[^_]*__')" + elif [ "$RESULT" != "ERROR" ] && [ -n "$RESULT" ]; then + echo "Status: ${GREEN}✅ PASS - All wildcards fully expanded${NC}" + else + echo "Status: ${RED}❌ FAIL - Server error or no response${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Direct level tests +echo "${CYAN}--- Direct Level Access Tests ---${NC}" +echo "" + +test_nesting "01" "Level 7 (Final)" \ + "__test_nesting_level7__" \ + 100 \ + 0 + +test_nesting "02" "Level 6 → Level 7" \ + "__test_nesting_level6__" \ + 200 \ + 1 + +test_nesting "03" "Level 5 → Level 6 → Level 7" \ + "__test_nesting_level5__" \ + 300 \ + 2 + +test_nesting "04" "Level 4 → ... → Level 7" \ + "__test_nesting_level4__" \ + 400 \ + 3 + +test_nesting "05" "Level 3 → ... → Level 7" \ + "__test_nesting_level3__" \ + 500 \ + 4 + +test_nesting "06" "Level 2 → ... → Level 7" \ + "__test_nesting_level2__" \ + 600 \ + 5 + +test_nesting "07" "Level 1 → ... → Level 7 (Full 7 levels)" \ + "__test_nesting_level1__" \ + 700 \ + 6 + +echo "" +echo "${CYAN}--- Multiple Nesting Tests ---${NC}" +echo "" + +test_nesting "08" "Two level 1 wildcards" \ + "__test_nesting_level1__ and __test_nesting_level1__" \ + 800 \ + 6 + +test_nesting "09" "Mixed depths" \ + "__test_nesting_level1__ with __test_nesting_level4__" \ + 900 \ + 6 + +test_nesting "10" "Level 1 in dynamic prompt" \ + "{__test_nesting_level1__|__test_nesting_level2__|__test_nesting_level3__}" \ + 1000 \ + 6 + +echo "" +echo "${CYAN}--- Complex Combination Tests ---${NC}" +echo "" + +test_nesting "11" "Nesting with quantifier" \ + "2#__test_nesting_level1__" \ + 1100 \ + 6 + +test_nesting "12" "Nesting with multi-select" \ + "{2\$\$, \$\$__test_nesting_level1__|__test_nesting_level2__|__test_nesting_level3__}" \ + 1200 \ + 6 + +test_nesting "13" "Nesting with weighted selection" \ + "{5::__test_nesting_level1__|3::__test_nesting_level3__|1::__test_nesting_level5__}" \ + 1300 \ + 6 + +test_nesting "14" "Very deep with other wildcards" \ + "__test_nesting_level1__ beautiful __아름다운색__" \ + 1400 \ + 6 + +test_nesting "15" "All 7 levels in one prompt" \ + "__test_nesting_level1__, __test_nesting_level2__, __test_nesting_level3__, __test_nesting_level4__, __test_nesting_level5__, __test_nesting_level6__, __test_nesting_level7__" \ + 1500 \ + 6 + +echo "" +echo "${CYAN}--- Depth-Agnostic Pattern Matching Tests ---${NC}" +echo "" + +# Test 16: Depth-agnostic pattern matching with __*/test_nesting_level7__ +# The __*/name__ pattern matches wildcards at ANY directory depth: +# - test_nesting_level7.txt (at root level) +# - level1/level2/.../level7/test_nesting_level7.txt (deeply nested) +# - any_folder/test_nesting_level7.txt (in any subfolder) +test_nesting "16" "Pattern matching __*/test_nesting_level7__" \ + "__*/test_nesting_level7__" \ + 1600 \ + 0 + +# Test 17: Depth-agnostic pattern matching with __*/test_nesting_level4__ +# Similar to __*/dragon__ matching both "dragon.txt" and "dragon/wizard.txt": +# - test_nesting_level4.txt (direct file) +# - level1/.../level4/test_nesting_level4.txt (nested file) +# - The pattern ignores directory depth and matches by wildcard name +test_nesting "17" "Pattern matching __*/test_nesting_level4__" \ + "__*/test_nesting_level4__" \ + 1700 \ + 3 + +echo "" +echo "==========================================" +echo "Loaded Wildcards Check" +echo "==========================================" + +# Check what wildcards were loaded +LOADED=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded 2>/dev/null | python3 -c "import sys, json; data = json.load(sys.stdin); print('\n'.join(data.get('data', [])))" 2>/dev/null || echo "ERROR") + +if [ "$LOADED" != "ERROR" ]; then + echo "Loaded wildcards:" + echo "$LOADED" | grep -E "test_nesting" | sed 's/^/ /' + + NESTING_COUNT=$(echo "$LOADED" | grep -c "test_nesting" || echo "0") + echo "" + echo "Total nesting wildcards loaded: $NESTING_COUNT" + + if [ "$NESTING_COUNT" -ge 7 ]; then + echo "${GREEN}✅ All 7 nesting levels loaded${NC}" + else + echo "${YELLOW}⚠️ Only $NESTING_COUNT nesting levels loaded (expected 7)${NC}" + fi +else + echo "${YELLOW}⚠️ Could not retrieve loaded wildcards list${NC}" +fi + +echo "" +echo "==========================================" +echo "Summary" +echo "==========================================" +echo "${GREEN}✅ Deep nesting tests completed${NC}" +echo "" +echo "Test results:" +echo " 1. 7-level transitive expansion tested ✓" +echo " 2. All depth levels (1-7) individually tested ✓" +echo " 3. Mixed depth combinations tested ✓" +echo " 4. Nesting with quantifiers and multi-select ✓" +echo " 5. Nesting with weighted selection ✓" +echo " 6. Depth-agnostic pattern matching (__*/pattern__) ✓" +echo " 7. Complex multi-wildcard prompts ✓" +echo "" +echo "Maximum nesting depth verified: 7 levels" +echo "All wildcards should be fully expanded without crashes" +echo "" +echo "Log file: $LOG_FILE" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_dynamic_prompts_full.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_dynamic_prompts_full.sh new file mode 100644 index 0000000000000000000000000000000000000000..90895c5262b6bc8feec1a7dc1eac39a90e613217 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_dynamic_prompts_full.sh @@ -0,0 +1,253 @@ +#!/bin/bash +# Comprehensive Dynamic Prompt Validation Test +# Tests all dynamic prompt features with statistical validation + +PORT=8188 + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +LOG_FILE="/tmp/dynamic_prompt_full_validation.log" + +exec > >(tee -a "$LOG_FILE") +exec 2>&1 + +echo "==========================================" +echo "Dynamic Prompt Full Validation Test" +echo "==========================================" +echo "Validating: All dynamic prompt features" +echo "" + +# Check server +if ! curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "${RED}Server not running on port $PORT${NC}" + echo "Start server with: cd /mnt/teratera/git/ComfyUI && bash run.sh --listen 127.0.0.1 --port $PORT" + exit 1 +fi + +TOTAL_GROUPS=0 +PASSED_GROUPS=0 +FAILED_GROUPS=0 + +# Test function for multiselect with validation +test_multiselect() { + local TEST_NAME=$1 + local PROMPT=$2 + local EXPECTED_COUNT=$3 + local SEPARATOR=$4 + local ITERATIONS=$5 + shift 5 + local OPTIONS=("$@") + + echo "${BLUE}=== $TEST_NAME ===${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo "Expected: $EXPECTED_COUNT items per result, separator: '$SEPARATOR'" + echo -n "Testing $ITERATIONS iterations: " + + local PASSED=0 + local FAILED=0 + declare -a FAILURES + + for i in $(seq 1 $ITERATIONS); do + SEED=$((1000 + i * 100)) + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + if [ "$RESULT" = "ERROR" ]; then + echo -n "X" + ((FAILED++)) + FAILURES+=(" Iteration $i (seed $SEED): Server error") + continue + fi + + # Count items based on separator + if [ -z "$SEPARATOR" ]; then + ITEM_COUNT=1 + else + ITEM_COUNT=$(echo "$RESULT" | awk -F"$SEPARATOR" '{print NF}') + fi + + # Check if count matches + if [ $ITEM_COUNT -ne $EXPECTED_COUNT ]; then + echo -n "X" + ((FAILED++)) + FAILURES+=(" Iteration $i (seed $SEED): Expected $EXPECTED_COUNT items, got $ITEM_COUNT" " Result: $RESULT") + continue + fi + + # Check for duplicates (split by separator and check uniqueness) + if [ -n "$SEPARATOR" ]; then + UNIQUE_COUNT=$(echo "$RESULT" | awk -F"$SEPARATOR" '{for(i=1;i<=NF;i++) print $i}' | sort -u | wc -l) + if [ $UNIQUE_COUNT -ne $EXPECTED_COUNT ]; then + echo -n "D" + ((FAILED++)) + FAILURES+=(" Iteration $i (seed $SEED): Duplicates detected" " Result: $RESULT") + continue + fi + fi + + # Check that all items are from the option list + VALID=1 + if [ -n "$SEPARATOR" ]; then + while IFS= read -r item; do + item=$(echo "$item" | xargs) # trim whitespace + FOUND=0 + for opt in "${OPTIONS[@]}"; do + if [ "$item" = "$opt" ]; then + FOUND=1 + break + fi + done + if [ $FOUND -eq 0 ]; then + VALID=0 + break + fi + done < <(echo "$RESULT" | awk -F"$SEPARATOR" '{for(i=1;i<=NF;i++) print $i}') + fi + + if [ $VALID -eq 0 ]; then + echo -n "?" + ((FAILED++)) + FAILURES+=(" Iteration $i (seed $SEED): Invalid items detected" " Result: $RESULT") + continue + fi + + echo -n "." + ((PASSED++)) + done + + echo " Done" + echo "Results: ${GREEN}$PASSED passed${NC}, ${RED}$FAILED failed${NC}" + + if [ $FAILED -gt 0 ]; then + echo -e "${RED}Failures:${NC}" + printf '%s\n' "${FAILURES[@]}" + ((FAILED_GROUPS++)) + else + echo "${GREEN}✅ PASS${NC}" + ((PASSED_GROUPS++)) + fi + echo "" + ((TOTAL_GROUPS++)) +} + +# Test function for weighted selection with statistical validation +test_weighted() { + local TEST_NAME=$1 + local PROMPT=$2 + local ITERATIONS=$3 + shift 3 + local OPTIONS=("$@") + + echo "${BLUE}=== $TEST_NAME ===${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo -n "Testing $ITERATIONS iterations: " + + declare -A COUNTS + local TOTAL=0 + + for i in $(seq 1 $ITERATIONS); do + SEED=$((1000 + i * 100)) + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + if [ "$RESULT" = "ERROR" ]; then + echo -n "X" + continue + fi + + MATCHED=0 + for opt in "${OPTIONS[@]}"; do + if echo "$RESULT" | grep -Fq "$opt"; then + COUNTS[$opt]=$((${COUNTS[$opt]:-0} + 1)) + MATCHED=1 + break + fi + done + + if [ $MATCHED -eq 1 ]; then + ((TOTAL++)) + echo -n "." + else + echo -n "?" + fi + done + + echo " Done" + echo "Distribution:" + + for opt in "${OPTIONS[@]}"; do + local COUNT=${COUNTS[$opt]:-0} + local PERCENT=0 + if [ $TOTAL -gt 0 ]; then + PERCENT=$(awk "BEGIN {printf \"%.1f\", ($COUNT / $TOTAL) * 100}") + fi + echo " $opt: $COUNT / $TOTAL (${PERCENT}%)" + done + + echo "${GREEN}✅ PASS${NC}" + ((PASSED_GROUPS++)) + ((TOTAL_GROUPS++)) + echo "" +} + +echo "==========================================" +echo "MULTISELECT VALIDATION" +echo "==========================================" +echo "" + +test_multiselect "Test 1: 2-item multiselect" "{2\$\$, \$\$red|blue|green|yellow}" 2 ", " 20 "red" "blue" "green" "yellow" + +test_multiselect "Test 2: 3-item multiselect" "{3\$\$ and \$\$alpha|beta|gamma|delta|epsilon}" 3 " and " 20 "alpha" "beta" "gamma" "delta" "epsilon" + +test_multiselect "Test 3: Single-item multiselect" "{1\$\$ \$\$one|two|three}" 1 " " 20 "one" "two" "three" + +test_multiselect "Test 4: Max-item multiselect (all 4)" "{4\$\$-\$\$cat|dog|bird|fish}" 4 "-" 20 "cat" "dog" "bird" "fish" + +echo "==========================================" +echo "WEIGHTED SELECTION VALIDATION" +echo "==========================================" +echo "" + +test_weighted "Test 5: Heavy bias 10:1 (100 iterations)" "{10::common|1::rare}" 100 "common" "rare" + +test_weighted "Test 6: Equal weights 1:1:1 (60 iterations)" "{1::alpha|1::beta|1::gamma}" 60 "alpha" "beta" "gamma" + +test_weighted "Test 7: Extreme bias 100:1 (100 iterations)" "{100::very_common|1::very_rare}" 100 "very_common" "very_rare" + +test_weighted "Test 8: Multi-level weights 5:3:2 (100 iterations)" "{5::high|3::medium|2::low}" 100 "high" "medium" "low" + +test_weighted "Test 9: Default weight mixing (100 iterations)" "{10::weighted|unweighted}" 100 "weighted" "unweighted" + +echo "==========================================" +echo "BASIC SELECTION VALIDATION" +echo "==========================================" +echo "" + +test_weighted "Test 10: Simple random selection (50 iterations)" "{option_a|option_b|option_c}" 50 "option_a" "option_b" "option_c" + +test_weighted "Test 11: Nested selection (50 iterations)" "{outer_{inner1|inner2}|simple}" 50 "outer_inner1" "outer_inner2" "simple" + +echo "==========================================" +echo "SUMMARY" +echo "==========================================" +echo "" +echo "Total test groups: $TOTAL_GROUPS" +echo "${GREEN}Passed: $PASSED_GROUPS${NC}" +echo "${RED}Failed: $FAILED_GROUPS${NC}" +echo "" + +if [ $FAILED_GROUPS -eq 0 ]; then + echo "${GREEN}✅ All tests passed${NC}" + exit 0 +else + echo "${RED}❌ Some tests failed${NC}" + exit 1 +fi diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_edge_cases.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_edge_cases.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f65e15bf4bddb231da35e7fef88fb60fe11c504 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_edge_cases.sh @@ -0,0 +1,225 @@ +#!/bin/bash +# Edge Cases Test Suite +# Tests edge cases: empty lines, whitespace, long lines, special characters, etc. + +set -e + +PORT=8196 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" +LOG_FILE="/tmp/edge_cases_test.log" + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo "==========================================" +echo "Edge Cases Test Suite" +echo "==========================================" +echo "Port: $PORT" +echo "Testing: Edge cases and boundary conditions" +echo "" + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + rm -f "$CONFIG_FILE" + echo "Cleanup complete" +} + +trap cleanup EXIT + +# Kill any existing server on this port +echo "Killing any existing server on port $PORT..." +pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true +sleep 2 + +# Setup configuration +echo "Setting up configuration..." +cat > "$CONFIG_FILE" << EOF +[default] +custom_wildcards = $IMPACT_DIR/tests/wildcards/samples +wildcard_cache_limit_mb = 50 +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +disable_gpu_opencv = True +EOF + +echo "Configuration created: custom_wildcards = $IMPACT_DIR/tests/wildcards/samples" +echo "" + +# Start server +echo "Starting ComfyUI server on port $PORT..." +cd "$COMFYUI_DIR" +bash run.sh --listen 127.0.0.1 --port $PORT > "$LOG_FILE" 2>&1 & +SERVER_PID=$! +echo "Server PID: $SERVER_PID" + +# Wait for server startup +echo "Waiting for server startup..." +for i in {1..60}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "✅ Server ready (${i}s)" + break + fi + if [ $((i % 10)) -eq 0 ]; then + echo " ... ${i}s elapsed" + fi + if [ $i -eq 60 ]; then + echo "" + echo "${RED}❌ Server failed to start within 60 seconds${NC}" + echo "Log tail:" + tail -20 "$LOG_FILE" + exit 1 + fi +done + +echo "" + +# Test function +test_edge_case() { + local TEST_NUM=$1 + local DESCRIPTION=$2 + local PROMPT=$3 + local SEED=$4 + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo "Seed: $SEED" + + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Result: ${GREEN}$RESULT${NC}" + + if [ "$RESULT" != "ERROR" ] && [ -n "$RESULT" ]; then + echo "Status: ${GREEN}✅ PASS${NC}" + else + echo "Status: ${RED}❌ FAIL${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Empty Lines and Whitespace Tests +test_edge_case "01" "Empty lines handling" \ + "__test_edge_empty_lines__" \ + 100 + +test_edge_case "02" "Whitespace handling" \ + "__test_edge_whitespace__" \ + 200 + +test_edge_case "03" "Long lines handling" \ + "__test_edge_long_lines__" \ + 300 + +# Special Characters Tests +test_edge_case "04" "Special characters in content" \ + "__test_edge_special_chars__" \ + 400 + +test_edge_case "05" "Embedded wildcard syntax" \ + "__test_edge_special_chars__" \ + 401 + +# Case Insensitivity Tests +test_edge_case "06" "Lowercase wildcard" \ + "__test_edge_case_insensitive__" \ + 500 + +test_edge_case "07" "UPPERCASE wildcard" \ + "__TEST_EDGE_CASE_INSENSITIVE__" \ + 500 + +test_edge_case "08" "MixedCase wildcard" \ + "__TeSt_EdGe_CaSe_InSeNsItIvE__" \ + 500 + +# Comment Handling Tests +test_edge_case "09" "Comments in wildcard file" \ + "__test_comments__" \ + 600 + +# Pattern Matching Tests +test_edge_case "10" "Pattern matching __*/name__" \ + "__*/test_pattern_match__" \ + 700 + +test_edge_case "11" "Direct pattern match" \ + "__test_pattern_match__" \ + 700 + +# Quantifier Tests +test_edge_case "12" "Quantifier 3#" \ + "3#__test_quantifier__" \ + 800 + +test_edge_case "13" "Quantifier 5# with dynamic" \ + "{2\$\$, \$\$5#__test_quantifier__}" \ + 801 + +# Complex Combinations +test_edge_case "14" "Mixed special chars and wildcards" \ + "__test_edge_special_chars__ with {option1|option2}" \ + 900 + +test_edge_case "15" "Long prompt with multiple wildcards" \ + "__test_edge_empty_lines__ and __test_edge_whitespace__ and __test_comments__" \ + 1000 + +# Boundary Conditions +test_edge_case "16" "Very long dynamic prompt" \ + "{__test_edge_long_lines__|__test_edge_whitespace__|__test_edge_empty_lines__|__test_comments__|__test_edge_special_chars__}" \ + 1100 + +test_edge_case "17" "Nested wildcards in dynamic" \ + "{red __test_quantifier__|blue __test_pattern_match__|green __test_comments__}" \ + 1200 + +test_edge_case "18" "Quantifier with case-insensitive" \ + "2#__TEST_QUANTIFIER__" \ + 1300 + +# Stress Tests +test_edge_case "19" "Multiple quantifiers" \ + "3#__test_quantifier__ and 2#__test_comments__" \ + 1400 + +test_edge_case "20" "Case insensitive pattern match" \ + "__*/TEST_PATTERN_MATCH__" \ + 1500 + +echo "" +echo "==========================================" +echo "Summary" +echo "==========================================" +echo "${GREEN}✅ Edge case tests completed${NC}" +echo "" +echo "All tests verified edge case handling:" +echo " 1. Empty lines and whitespace ✓" +echo " 2. Very long lines ✓" +echo " 3. Special characters ✓" +echo " 4. Case-insensitive matching ✓" +echo " 5. Comment line filtering ✓" +echo " 6. Pattern matching (__*/name__) ✓" +echo " 7. Quantifiers (N#__wildcard__) ✓" +echo " 8. Complex combinations ✓" +echo " 9. Boundary conditions ✓" +echo "" +echo "Log file: $LOG_FILE" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_encoding.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_encoding.sh new file mode 100644 index 0000000000000000000000000000000000000000..9bea639afc93f9d9818c0d064a06959d1afcb494 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_encoding.sh @@ -0,0 +1,204 @@ +#!/bin/bash +# UTF-8 Encoding Test Suite +# Tests multi-language support (Korean, Chinese, Arabic, emoji) + +set -e + +PORT=8198 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" +LOG_FILE="/tmp/encoding_test.log" + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo "==========================================" +echo "UTF-8 Encoding Test Suite" +echo "==========================================" +echo "Port: $PORT" +echo "Testing: Multi-language encoding support" +echo "" + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + rm -f "$CONFIG_FILE" + echo "Cleanup complete" +} + +trap cleanup EXIT + +# Kill any existing server on this port +echo "Killing any existing server on port $PORT..." +pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true +sleep 2 + +# Setup configuration +echo "Setting up configuration..." +cat > "$CONFIG_FILE" << EOF +[default] +custom_wildcards = $IMPACT_DIR/tests/wildcards/samples +wildcard_cache_limit_mb = 50 +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +disable_gpu_opencv = True +EOF + +echo "Configuration created: custom_wildcards = $IMPACT_DIR/tests/wildcards/samples" +echo "" + +# Start server +echo "Starting ComfyUI server on port $PORT..." +cd "$COMFYUI_DIR" +bash run.sh --listen 127.0.0.1 --port $PORT > "$LOG_FILE" 2>&1 & +SERVER_PID=$! +echo "Server PID: $SERVER_PID" + +# Wait for server startup +echo "Waiting for server startup..." +for i in {1..60}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "✅ Server ready (${i}s)" + break + fi + if [ $((i % 10)) -eq 0 ]; then + echo " ... ${i}s elapsed" + fi + if [ $i -eq 60 ]; then + echo "" + echo "${RED}❌ Server failed to start within 60 seconds${NC}" + echo "Log tail:" + tail -20 "$LOG_FILE" + exit 1 + fi +done + +echo "" + +# Test function +test_encoding() { + local TEST_NUM=$1 + local DESCRIPTION=$2 + local PROMPT=$3 + local SEED=$4 + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo "Seed: $SEED" + + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Result: ${GREEN}$RESULT${NC}" + + # Check if result contains non-ASCII characters (UTF-8) + if echo "$RESULT" | grep -qP '[\x80-\xFF]'; then + echo "Status: ${GREEN}✅ PASS - UTF-8 characters preserved${NC}" + elif [ "$RESULT" != "ERROR" ] && [ -n "$RESULT" ]; then + echo "Status: ${YELLOW}⚠️ WARNING - No UTF-8 characters in result${NC}" + else + echo "Status: ${RED}❌ FAIL - Server error or no response${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Korean Tests (K-pop theme with Korean filename) +test_encoding "01" "Korean Hangul (아름다운색)" \ + "__아름다운색__" \ + 100 + +test_encoding "02" "Korean with emoji" \ + "🌸 __아름다운색__" \ + 200 + +test_encoding "03" "Korean in dynamic prompt" \ + "{붉은|하얀|노란} __아름다운색__" \ + 300 + +# Emoji Tests +test_encoding "04" "Emoji wildcard" \ + "__test_encoding_emoji__" \ + 400 + +test_encoding "05" "Multiple emojis" \ + "🌸 beautiful 🌺 garden 🌼" \ + 500 + +test_encoding "06" "Emoji in dynamic prompt" \ + "{🌸|🌺|🌼|🌻|🌷}" \ + 600 + +# Special Characters Tests +test_encoding "07" "Mathematical symbols" \ + "__test_encoding_special__" \ + 700 + +test_encoding "08" "Currency symbols" \ + "Price: {$|€|£|¥|₩} 100" \ + 800 + +# Mixed Language Tests +test_encoding "09" "Korean + Chinese" \ + "아름다운 __아름다운색__" \ + 900 + +test_encoding "10" "Korean + Emoji + English" \ + "🌸 beautiful 아름다운 __아름다운색__" \ + 1000 + +# RTL (Right-to-Left) Tests +test_encoding "11" "Arabic RTL text" \ + "زهرة جميلة" \ + 1100 + +# Edge Cases +test_encoding "12" "Korean in quantifier (아름다운색)" \ + "3#__아름다운색__" \ + 1200 + +test_encoding "13" "Korean in multi-select (아름다운색)" \ + "{2\$\$, \$\$__아름다운색__|장미|벚꽃}" \ + 1300 + +test_encoding "14" "Mixed UTF-8 in weighted selection" \ + "{5::🌸|3::장미|2::花}" \ + 1400 + +test_encoding "15" "Very long Korean text (아름다운색)" \ + "아름다운 {붉은|하얀|노란|분홍|보라} __아름다운색__ 꽃밭에서" \ + 1500 + +echo "" +echo "==========================================" +echo "Summary" +echo "==========================================" +echo "${GREEN}✅ Encoding tests completed${NC}" +echo "" +echo "All tests verified UTF-8 encoding support:" +echo " 1. Korean (Hangul) characters ✓" +echo " 2. Emoji support ✓" +echo " 3. Chinese characters ✓" +echo " 4. Arabic (RTL) text ✓" +echo " 5. Mathematical and special symbols ✓" +echo " 6. Mixed multi-language content ✓" +echo " 7. UTF-8 in dynamic prompts ✓" +echo " 8. UTF-8 with quantifiers and multi-select ✓" +echo "" +echo "Log file: $LOG_FILE" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_error_handling.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_error_handling.sh new file mode 100644 index 0000000000000000000000000000000000000000..3326338b6d777349238f3caa555b757be38b79d0 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_error_handling.sh @@ -0,0 +1,195 @@ +#!/bin/bash +# Error Handling Test Suite +# Tests graceful error handling for invalid wildcards, circular references, etc. + +set -e + +PORT=8197 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" +LOG_FILE="/tmp/error_handling_test.log" + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo "==========================================" +echo "Error Handling Test Suite" +echo "==========================================" +echo "Port: $PORT" +echo "Testing: Error handling and edge cases" +echo "" + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + rm -f "$CONFIG_FILE" + echo "Cleanup complete" +} + +trap cleanup EXIT + +# Kill any existing server on this port +echo "Killing any existing server on port $PORT..." +pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true +sleep 2 + +# Setup configuration to use test wildcard samples +echo "Setting up configuration..." +cat > "$CONFIG_FILE" << EOF +[default] +custom_wildcards = $IMPACT_DIR/tests/wildcards/samples +wildcard_cache_limit_mb = 50 +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +disable_gpu_opencv = True +EOF + +echo "Configuration created: custom_wildcards = $IMPACT_DIR/tests/wildcards/samples" +echo "" + +# Start server +echo "Starting ComfyUI server on port $PORT..." +cd "$COMFYUI_DIR" +bash run.sh --listen 127.0.0.1 --port $PORT > "$LOG_FILE" 2>&1 & +SERVER_PID=$! +echo "Server PID: $SERVER_PID" + +# Wait for server startup +echo "Waiting for server startup..." +for i in {1..60}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + echo "✅ Server ready (${i}s)" + break + fi + if [ $((i % 10)) -eq 0 ]; then + echo " ... ${i}s elapsed" + fi + if [ $i -eq 60 ]; then + echo "" + echo "${RED}❌ Server failed to start within 60 seconds${NC}" + echo "Log tail:" + tail -20 "$LOG_FILE" + exit 1 + fi +done + +echo "" + +# Test function +test_error_case() { + local TEST_NUM=$1 + local DESCRIPTION=$2 + local PROMPT=$3 + local SEED=$4 + local EXPECTED_BEHAVIOR=$5 + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo "Seed: $SEED" + echo "Expected: $EXPECTED_BEHAVIOR" + + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Result: ${GREEN}$RESULT${NC}" + + # Check if result is not an error + if [ "$RESULT" != "ERROR" ] && [ -n "$RESULT" ]; then + echo "Status: ${GREEN}✅ PASS - No crash, graceful handling${NC}" + else + echo "Status: ${RED}❌ FAIL - Server error or no response${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Test 1: Non-existent wildcard reference +test_error_case "01" "Non-existent wildcard" \ + "__test_error_cases__" \ + 42 \ + "Should handle missing wildcard gracefully" + +# Test 2: Circular reference detection +test_error_case "02" "Circular reference A" \ + "__test_circular_a__" \ + 100 \ + "Should detect cycle and stop at max iterations" + +# Test 3: Circular reference from B +test_error_case "03" "Circular reference B" \ + "__test_circular_b__" \ + 200 \ + "Should detect cycle and stop at max iterations" + +# Test 4: Completely non-existent wildcard +test_error_case "04" "Completely missing wildcard" \ + "__this_file_does_not_exist__" \ + 42 \ + "Should leave unexpanded or show error" + +# Test 5: Mixed valid and invalid +test_error_case "05" "Mixed valid and invalid" \ + "beautiful __test_quantifier__ with __nonexistent__" \ + 42 \ + "Should expand valid, handle invalid gracefully" + +# Test 6: Empty dynamic prompt +test_error_case "06" "Empty dynamic option" \ + "{|something|nothing}" \ + 42 \ + "Should handle empty option" + +# Test 7: Single option dynamic +test_error_case "07" "Single option dynamic" \ + "{only_one}" \ + 42 \ + "Should return the single option" + +# Test 8: Malformed dynamic prompt (unclosed) +test_error_case "08" "Malformed dynamic prompt" \ + "{option1|option2" \ + 42 \ + "Should handle unclosed bracket gracefully" + +# Test 9: Very deeply nested dynamic prompts +test_error_case "09" "Very deep nesting" \ + "{a|{b|{c|{d|{e|{f|{g|{h|i}}}}}}}" \ + 42 \ + "Should handle deep nesting without crash" + +# Test 10: Multiple circular references in one prompt +test_error_case "10" "Multiple circular refs" \ + "__test_circular_a__ and __test_circular_b__" \ + 42 \ + "Should handle multiple circular references" + +echo "" +echo "==========================================" +echo "Summary" +echo "==========================================" +echo "${GREEN}✅ Error handling tests completed${NC}" +echo "" +echo "All tests verified graceful error handling:" +echo " 1. Non-existent wildcards handled" +echo " 2. Circular references detected (max 100 iterations)" +echo " 3. Malformed syntax handled gracefully" +echo " 4. Deep nesting processed correctly" +echo " 5. No server crashes occurred" +echo "" +echo "Log file: $LOG_FILE" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_ondemand_loading.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_ondemand_loading.sh new file mode 100644 index 0000000000000000000000000000000000000000..2305656c067fe90cb5450876d04ec4a4700cd6af --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/test_ondemand_loading.sh @@ -0,0 +1,228 @@ +#!/bin/bash +# On-Demand Lazy Loading Test Suite +# Tests progressive on-demand wildcard loading with cache limits + +set -e + +PORT=8193 +COMFYUI_DIR="/mnt/teratera/git/ComfyUI" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" +LOG_FILE="/tmp/ondemand_test.log" +TEMP_SAMPLES_DIR="/tmp/ondemand_test_samples" + +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo "==========================================" +echo "On-Demand Lazy Loading Test Suite" +echo "==========================================" +echo "Port: $PORT" +echo "Testing: Progressive on-demand wildcard loading" +echo "" + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + rm -f "$CONFIG_FILE" + rm -rf "$TEMP_SAMPLES_DIR" + echo "Cleanup complete" +} + +trap cleanup EXIT + +# Create temporary sample files for on-demand testing +echo "Creating temporary sample files..." +mkdir -p "$TEMP_SAMPLES_DIR" + +# Create large sample files to test cache limits +for i in {1..50}; do + cat > "$TEMP_SAMPLES_DIR/large_sample_${i}.txt" << EOF +# Large sample file $i for on-demand loading test +$(for j in {1..100}; do echo "option_${i}_${j}"; done) +EOF +done + +# Create Korean sample +cp "$SCRIPT_DIR/wildcards/samples/아름다운색.txt" "$TEMP_SAMPLES_DIR/" 2>/dev/null || \ +cat > "$TEMP_SAMPLES_DIR/아름다운색.txt" << 'EOF' +수놓은 별빛 +벚꽃 핑크 +강코랄 +옌로우 +챈메랄드 +챔무 +백설민주 +나부키하늘 +토미베이지 +율렌지 +블루지니 +캔디핑크 +EOF + +# Create nesting samples +mkdir -p "$TEMP_SAMPLES_DIR/level1/level2/level3" +echo "__large_sample_10__" > "$TEMP_SAMPLES_DIR/level1/test_nesting_level1.txt" +echo "option_a" >> "$TEMP_SAMPLES_DIR/level1/test_nesting_level1.txt" +echo "__large_sample_20__" > "$TEMP_SAMPLES_DIR/level1/level2/test_nesting_level2.txt" +echo "option_b" >> "$TEMP_SAMPLES_DIR/level1/level2/test_nesting_level2.txt" +echo "final_option" > "$TEMP_SAMPLES_DIR/level1/level2/level3/test_nesting_level3.txt" + +echo "✅ Created $(find $TEMP_SAMPLES_DIR -name '*.txt' | wc -l) temporary sample files" +echo "" + +# Kill any existing server on this port +echo "Killing any existing server on port $PORT..." +pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true +sleep 2 + +# Test function for on-demand mode +test_ondemand() { + local TEST_NUM=$1 + local DESCRIPTION=$2 + local CACHE_LIMIT=$3 + local PROMPT=$4 + local SEED=$5 + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Cache Limit: ${YELLOW}${CACHE_LIMIT}MB${NC}" + echo "Prompt: ${YELLOW}$PROMPT${NC}" + echo "Seed: $SEED" + + # Restart server with new cache limit + pkill -f "python.*main.py.*--port $PORT" 2>/dev/null || true + sleep 2 + + # Setup configuration with cache limit pointing to temporary samples + cat > "$CONFIG_FILE" << EOF +[default] +custom_wildcards = $TEMP_SAMPLES_DIR +wildcard_cache_limit_mb = $CACHE_LIMIT +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +disable_gpu_opencv = True +EOF + + # Start server + cd "$COMFYUI_DIR" + bash run.sh --listen 127.0.0.1 --port $PORT > "$LOG_FILE" 2>&1 & + SERVER_PID=$! + + # Wait for server + for i in {1..60}; do + sleep 1 + if curl -s http://127.0.0.1:$PORT/ > /dev/null 2>&1; then + break + fi + if [ $i -eq 60 ]; then + echo "${RED}❌ Server failed to start${NC}" + exit 1 + fi + done + + # Test wildcard expansion + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Result: ${GREEN}$RESULT${NC}" + + # Get loaded wildcards count + LOADED_COUNT=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded 2>/dev/null | \ + python3 -c "import sys, json; print(len(json.load(sys.stdin).get('data',[])))" 2>/dev/null || echo "0") + + echo "Loaded wildcards: ${YELLOW}$LOADED_COUNT${NC}" + + if [ "$RESULT" != "ERROR" ] && [ -n "$RESULT" ]; then + echo "Status: ${GREEN}✅ PASS - On-demand loading working${NC}" + else + echo "Status: ${RED}❌ FAIL - Server error${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Test 1: Small cache limit (1MB) - should enable on-demand mode +test_ondemand "01" "Small cache limit (1MB) - on-demand enabled" \ + "1" \ + "__아름다운색__" \ + 100 + +# Test 2: Moderate cache limit (10MB) - on-demand mode +test_ondemand "02" "Moderate cache limit (10MB) - progressive loading" \ + "10" \ + "__large_sample_5__" \ + 200 + +# Test 3: Large cache limit (100MB) - eager loading +test_ondemand "03" "Large cache limit (100MB) - eager loading" \ + "100" \ + "__아름다운색__" \ + 300 + +# Test 4: Very small cache (0.5MB) - aggressive lazy loading +test_ondemand "04" "Very small cache (0.5MB) - aggressive lazy loading" \ + "0.5" \ + "{__아름다운색__|__large_sample_15__|__large_sample_25__}" \ + 400 + +# Test 5: Default cache (50MB) - balanced mode +test_ondemand "05" "Default cache (50MB) - balanced mode" \ + "50" \ + "2#__large_sample_30__" \ + 500 + +# Test 6: On-demand with deep nesting +test_ondemand "06" "On-demand with 3-level nesting (5MB cache)" \ + "5" \ + "__level1/test_nesting_level1__" \ + 600 + +# Test 7: On-demand with multiple wildcards +test_ondemand "07" "On-demand with multiple wildcards (2MB cache)" \ + "2" \ + "__아름다운색__ and __large_sample_1__ in {__large_sample_40__|__large_sample_45__}" \ + 700 + +# Test 8: Cache limit boundary test +test_ondemand "08" "Cache boundary - exactly at limit (25MB)" \ + "25" \ + "{2$$,$$__large_sample_10__|__large_sample_20__|__large_sample_30__}" \ + 800 + +echo "" +echo "==========================================" +echo "Summary" +echo "==========================================" +echo "${GREEN}✅ On-demand loading tests completed${NC}" +echo "" +echo "Test results:" +echo " 1. Small cache (1MB) - on-demand enabled ✓" +echo " 2. Moderate cache (10MB) - progressive loading ✓" +echo " 3. Large cache (100MB) - eager loading ✓" +echo " 4. Aggressive lazy loading (0.5MB) ✓" +echo " 5. Balanced mode (50MB default) ✓" +echo " 6. On-demand with deep nesting ✓" +echo " 7. On-demand with multiple wildcards ✓" +echo " 8. Cache boundary testing ✓" +echo "" +echo "On-demand mode verification:" +echo " - LazyWildcardLoader initialization ✓" +echo " - Progressive data loading ✓" +echo " - Memory-efficient operation ✓" +echo " - Cache limit enforcement ✓" +echo "" +echo "Log file: $LOG_FILE" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/README.md b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1cac5135c0c17c3b82b8b4f520084210023857cc --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/README.md @@ -0,0 +1,961 @@ +# Wildcard System - Complete Test Suite + +Comprehensive testing guide for the ComfyUI Impact Pack wildcard system. + +--- + +## 📋 Quick Links + +- **[Quick Start](#quick-start)** - Run tests in 5 minutes +- **[Test Categories](#test-categories)** - All test types +- **[Test Execution](#test-execution)** - How to run each test +- **[Troubleshooting](#troubleshooting)** - Common issues + +--- + +## Overview + +### Test Suite Structure + +``` +tests/ +├── wildcards/ # Wildcard system tests +│ ├── Unit Tests (Python) +│ │ ├── test_wildcard_lazy_loading.py # LazyWildcardLoader class +│ │ ├── test_progressive_loading.py # Progressive loading +│ │ ├── test_wildcard_final.py # Final validation +│ │ └── test_lazy_load_verification.py # Lazy load verification +│ │ +│ ├── Integration Tests (Shell + API) +│ │ ├── test_progressive_ondemand.sh # ⭐ Progressive loading (NEW) +│ │ ├── test_lazy_load_api.sh # Lazy loading consistency +│ │ ├── test_sequential_loading.sh # Transitive wildcards +│ │ ├── test_versatile_prompts.sh # Feature tests +│ │ ├── test_wildcard_consistency.sh # Consistency validation +│ │ └── test_wildcard_features.sh # Core features +│ │ +│ ├── Utility Scripts +│ │ ├── find_transitive_wildcards.sh # Find transitive chains +│ │ ├── find_deep_transitive.py # Deep transitive analysis +│ │ ├── verify_ondemand_mode.sh # Verify on-demand activation +│ │ └── run_quick_test.sh # Quick validation +│ │ +│ └── README.md (this file) +│ +└── workflows/ # Workflow test files + ├── advanced-sampler.json + ├── detailer-pipe-test.json + └── ... +``` + +### Test Coverage + +- **11 test files** (4 Python, 7 Shell) +- **100+ test scenarios** +- **~95% feature coverage** +- **~15 minutes** total execution time + +--- + +## Quick Start + +### Run All Tests + +```bash +cd /path/to/ComfyUI/custom_nodes/comfyui-impact-pack/tests/wildcards + +# Run all shell tests +for test in test_*.sh; do + echo "Running: $test" + bash "$test" +done +``` + +### Run Specific Test + +```bash +cd /path/to/ComfyUI/custom_nodes/comfyui-impact-pack/tests/wildcards + +# Progressive loading (NEW) +bash test_progressive_ondemand.sh + +# Lazy loading +bash test_lazy_load_api.sh + +# Sequential/transitive +bash test_sequential_loading.sh + +# Versatile prompts +bash test_versatile_prompts.sh +``` + +--- + +## Test Categories + +### 1. Progressive On-Demand Loading Tests ⭐ NEW + +**Purpose**: Verify wildcards are loaded progressively as accessed. + +**Test Files**: +- `test_progressive_ondemand.sh` (Shell, ~2 min) +- `test_progressive_loading.py` (Python unit test) + +#### What's Tested + +**Early Termination Size Calculation**: +```python +# Problem: 10GB scan takes 10-30 minutes +# Solution: Stop at cache limit +calculate_directory_size(path, limit=50MB) # < 1 second +``` + +**YAML Pre-loading + TXT On-Demand**: +```python +# Phase 1 (Startup): Pre-load ALL YAML files +# Reason: Keys are inside file content, not file path +load_yaml_files_only() # colors.yaml → colors, colors/warm, colors/cold + +# Phase 2 (Runtime): Load TXT files on-demand +# File path = key (e.g., "flower.txt" → "__flower__") +# No metadata scan for TXT files +``` + +**Progressive Loading**: +``` +Initial: /list/loaded → YAML keys only (e.g., colors, colors/warm, colors/cold) +After __flower__: /list/loaded → +1 TXT wildcard +After __dragon__: /list/loaded → +2-3 (TXT transitive) +``` + +**⚠️ YAML Limitation**: +YAML wildcards are excluded from on-demand mode because wildcard keys exist +inside the file content. To discover `__colors/warm__`, we must parse `colors.yaml`. +Solution: Convert large YAML collections to TXT file structure for true on-demand. + +#### New API Endpoint + +**`GET /impact/wildcards/list/loaded`**: +```json +{ + "data": ["__colors__", "__colors/warm__", "__colors/cold__", "__samples/flower__"], + "on_demand_mode": true, + "total_available": 0 +} +``` + +Note: `total_available` is 0 in on-demand mode (TXT files not pre-scanned) + +**Progressive Example**: +```bash +# Initial state (YAML pre-loaded) +curl /impact/wildcards/list/loaded +→ {"data": ["__colors__", "__colors/warm__", "__colors/cold__"], "total_available": 0} + +# Access first wildcard +curl -X POST /impact/wildcards -d '{"text": "__flower__", "seed": 42}' + +# Check again (TXT wildcard added) +curl /impact/wildcards/list/loaded +→ {"data": ["__colors__", "__colors/warm__", "__colors/cold__", "__samples/flower__"], "total_available": 0} +``` + +#### Performance Improvements + +**Large Dataset (10GB, 100K files)**: + +| Metric | Before | After | +|--------|--------|-------| +| **Startup** | 20-60 min | **< 1 min** | +| **Memory** | 5-10 GB | **< 100MB** | +| **Size calc** | 10-30 min | **< 1 sec** | + +#### Run Test + +```bash +bash test_progressive_ondemand.sh +``` + +**Expected Output**: +``` +Step 1: Initial state + Loaded wildcards: 0 + +Step 2: Access __samples/flower__ + Loaded wildcards: 1 +✓ PASS: Wildcard count increased + +Step 3: Access __dragon__ + Loaded wildcards: 3 +✓ PASS: Wildcard count increased progressively + +🎉 ALL TESTS PASSED +``` + +--- + +### 2. Lazy Loading Tests + +**Purpose**: Verify on-demand loading produces identical results to full cache mode. + +**Test Files**: +- `test_lazy_load_api.sh` (Shell, ~3 min) +- `test_wildcard_lazy_loading.py` (Python unit test) +- `test_lazy_load_verification.py` (Python verification) + +#### What's Tested + +**LazyWildcardLoader Class**: +- Loads data only on first access +- Acts as list-like proxy +- Thread-safe with locking + +**Mode Detection**: +- Automatic based on total size vs cache limit +- Full cache: < 50MB (default) +- On-demand: ≥ 50MB + +**Consistency**: +- Full cache results == On-demand results +- Same seeds produce same outputs +- All wildcard features work identically + +#### Test Scenarios + +**test_lazy_load_api.sh** runs both modes and compares: + +1. **Wildcard list** (before access) +2. **Simple wildcard**: `__samples/flower__` +3. **Depth 3 transitive**: `__adnd__ creature` +4. **YAML wildcard**: `__colors__` +5. **Wildcard list** (after access) + +**All results must match exactly**. + +#### Run Test + +```bash +bash test_lazy_load_api.sh +``` + +**Expected Output**: +``` +Testing: full_cache (limit: 100MB, port: 8190) +✓ Server started +Test 1: Get wildcard list + Total wildcards: 1000 + +Testing: on_demand (limit: 1MB, port: 8191) +✓ Server started +Test 1: Get wildcard list + Total wildcards: 1000 + +COMPARISON RESULTS +Test: Simple Wildcard +✓ Results MATCH + +🎉 ALL TESTS PASSED +On-demand loading produces IDENTICAL results! +``` + +--- + +### 3. Sequential/Transitive Loading Tests + +**Purpose**: Verify transitive wildcards expand correctly across multiple stages. + +**Test Files**: +- `test_sequential_loading.sh` (Shell, ~5 min) +- `find_transitive_wildcards.sh` (Utility) + +#### What's Tested + +**Transitive Expansion**: +``` +Depth 1: __samples/flower__ → rose +Depth 2: __dragon__ → __dragon/warrior__ → content +Depth 3: __adnd__ → __dragon__ → __dragon_spirit__ → content +``` + +**Maximum Depth**: 3 levels verified (system supports up to 100) + +#### Test Categories + +**17 tests across 5 categories**: + +1. **Depth Verification** (4 tests) + - Depth 1: Direct wildcard + - Depth 2: One level transitive + - Depth 3: Two levels + suffix + - Depth 3: Maximum chain + +2. **Mixed Transitive** (3 tests) + - Dynamic selection of transitive + - Multiple transitive in one prompt + - Nested transitive in dynamic + +3. **Complex Scenarios** (3 tests) + - Weighted selection with transitive + - Multi-select with transitive + - Quantified transitive + +4. **Edge Cases** (4 tests) + - Compound grammar + - Multiple wildcards, different depths + - YAML wildcards (no transitive) + - Transitive + YAML combination + +5. **On-Demand Mode** (3 tests) + - Depth 3 in on-demand + - Complex scenario in on-demand + - Multiple transitive in on-demand + +#### Example: Depth 3 Chain + +**Files**: +``` +adnd.txt: + __dragon__ + +dragon.txt: + __dragon_spirit__ + +dragon_spirit.txt: + Shrewd Hatchling + Ancient Dragon +``` + +**Usage**: +``` +__adnd__ creature +→ __dragon__ creature +→ __dragon_spirit__ creature +→ "Shrewd Hatchling creature" +``` + +#### Run Test + +```bash +bash test_sequential_loading.sh +``` + +**Expected Output**: +``` +=== Test 01: Depth 1 - Direct wildcard === +Raw prompt: __samples/flower__ +✓ All wildcards fully expanded +Final Output: rose +Status: ✅ SUCCESS + +=== Test 04: Depth 3 - Maximum transitive chain === +Raw prompt: __adnd__ creature +✓ All wildcards fully expanded +Final Output: Shrewd Hatchling creature +Status: ✅ SUCCESS +``` + +--- + +### 4. Versatile Prompts Tests + +**Purpose**: Test all wildcard features and syntax variations. + +**Test Files**: +- `test_versatile_prompts.sh` (Shell, ~2 min) +- `test_wildcard_features.sh` (Shell) +- `test_wildcard_consistency.sh` (Shell) + +#### What's Tested + +**30 prompts across 10 categories**: + +1. **Simple Wildcards** (3 tests) + - Basic substitution + - Case insensitive (uppercase) + - Case insensitive (mixed) + +2. **Dynamic Prompts** (3 tests) + - Simple: `{red|green|blue} apple` + - Nested: `{a|{d|e|f}|c}` + - Complex nested: `{blue apple|red {cherry|berry}}` + +3. **Selection Weights** (2 tests) + - Weighted: `{5::red|4::green|7::blue} car` + - Multiple weighted: `{10::beautiful|5::stunning} {3::sunset|2::sunrise}` + +4. **Compound Grammar** (3 tests) + - Wildcard + dynamic: `{pencil|apple|__flower__}` + - Complex compound: `1{girl|boy} {sitting|standing} with {__object__|item}` + - Nested compound: `{big|small} {red {apple|cherry}|blue __flower__}` + +5. **Multi-Select** (4 tests) + - Fixed count: `{2$$, $$opt1|opt2|opt3|opt4}` + - Range: `{2-4$$, $$opt1|opt2|opt3|opt4|opt5}` + - With separator: `{3$$; $$a|b|c|d|e}` + - Short form: `{-3$$, $$opt1|opt2|opt3|opt4}` + +6. **Quantifiers** (2 tests) + - Basic: `3#__wildcard__` + - With multi-select: `{2$$, $$5#__colors__}` + +7. **Wildcard Fallback** (2 tests) + - Auto-expand: `__flower__` → `__*/flower__` + - Wildcard patterns: `__samples/*__` + +8. **YAML Wildcards** (3 tests) + - Simple YAML: `__colors__` + - Nested YAML: `__colors/warm__` + - Multiple YAML: `__colors__ and __animals__` + +9. **Transitive Wildcards** (4 tests) + - Depth 2: `__dragon__` + - Depth 3: `__adnd__` + - Mixed depth: `__flower__ and __dragon__` + - Dynamic transitive: `{__dragon__|__adnd__}` + +10. **Real-World Scenarios** (4 tests) + - Portrait prompt + - Landscape prompt + - Fantasy prompt + - Abstract art prompt + +#### Example Tests + +**Test 04: Simple Dynamic Prompt**: +``` +Raw: {red|green|blue} apple +Seed: 100 +Result: "red apple" (deterministic) +``` + +**Test 09: Wildcard + Dynamic**: +``` +Raw: 1girl holding {blue pencil|red apple|colorful __samples/flower__} +Seed: 100 +Result: "1girl holding colorful chrysanthemum" +``` + +**Test 18: Multi-Select Range**: +``` +Raw: {2-4$$, $$happy|sad|angry|excited|calm} +Seed: 100 +Result: "happy, sad, angry" (2-4 emotions selected) +``` + +#### Run Test + +```bash +bash test_versatile_prompts.sh +``` + +**Expected Output**: +``` +======================================== +Test 01: Basic Wildcard +======================================== +Raw: __samples/flower__ +Result: chrysanthemum +Status: ✅ PASS + +======================================== +Test 04: Simple Dynamic Prompt +======================================== +Raw: {red|green|blue} apple +Result: red apple +Status: ✅ PASS + +Total: 30 tests +Passed: 30 +Failed: 0 +``` + +--- + +## Test Execution + +### Prerequisites + +**Required**: +- ComfyUI installed +- Impact Pack installed +- Python 3.8+ +- Bash shell +- curl (for API tests) + +**Optional**: +- jq (for JSON parsing) +- git (for version control) + +### Environment Setup + +**1. Configure Impact Pack**: +```bash +cd /path/to/ComfyUI/custom_nodes/comfyui-impact-pack + +# Create or edit config +cat > impact-pack.ini << EOF +[default] +dependency_version = 24 +wildcard_cache_limit_mb = 50 +custom_wildcards = $(pwd)/custom_wildcards +disable_gpu_opencv = True +EOF +``` + +**2. Prepare Wildcards**: +```bash +# Check wildcard files exist +ls wildcards/*.txt wildcards/*.yaml +ls custom_wildcards/*.txt +``` + +### Running Tests + +#### Unit Tests (Python) + +**Standalone** (no server required): +```bash +python3 test_wildcard_lazy_loading.py +python3 test_progressive_loading.py +``` + +**Note**: Requires ComfyUI environment or will show import errors. + +#### Integration Tests (Shell) + +**Manual Server Start**: +```bash +# Terminal 1: Start server +cd /path/to/ComfyUI +bash run.sh --listen 127.0.0.1 --port 8188 + +# Terminal 2: Run tests +cd custom_nodes/comfyui-impact-pack/tests +bash test_versatile_prompts.sh +``` + +**Automated** (tests start/stop server): +```bash +# Each test manages its own server +bash test_progressive_ondemand.sh # Port 8195 +bash test_lazy_load_api.sh # Ports 8190-8191 +bash test_sequential_loading.sh # Port 8193 +``` + +### Test Timing + +| Test | Duration | Server | Ports | +|------|----------|--------|-------| +| `test_progressive_ondemand.sh` | ~2 min | Auto | 8195 | +| `test_lazy_load_api.sh` | ~3 min | Auto | 8190-8191 | +| `test_sequential_loading.sh` | ~5 min | Auto | 8193 | +| `test_versatile_prompts.sh` | ~2 min | Manual | 8188 | +| `test_wildcard_consistency.sh` | ~1 min | Manual | 8188 | +| Python unit tests | < 5 sec | No | N/A | + +### Logs + +**Server Logs**: +```bash +/tmp/progressive_test.log +/tmp/comfyui_full_cache.log +/tmp/comfyui_on_demand.log +/tmp/sequential_test.log +``` + +**Check Logs**: +```bash +# View recent wildcard logs +tail -50 /tmp/progressive_test.log | grep -i wildcard + +# Find errors +grep -i "error\|fail" /tmp/*.log + +# Check mode activation +grep -i "mode" /tmp/progressive_test.log +``` + +--- + +## Expected Results + +### Success Criteria + +#### Progressive Loading +- ✅ `/list/loaded` starts at 0 (or low count) +- ✅ `/list/loaded` increases after each unique wildcard +- ✅ `/list/loaded` unchanged on cache hits +- ✅ Transitive wildcards load multiple entries +- ✅ Final results identical to full cache mode + +#### Lazy Loading +- ✅ Full cache results == On-demand results (all tests) +- ✅ Mode detection correct (based on size vs limit) +- ✅ LazyWildcardLoader loads only on access +- ✅ All API endpoints return consistent data + +#### Sequential Loading +- ✅ Depth 1-3 expand correctly +- ✅ Complex scenarios work (weighted, multi-select, etc.) +- ✅ On-demand mode matches full cache +- ✅ No infinite loops (max 100 iterations) + +#### Versatile Prompts +- ✅ All 30 test prompts process successfully +- ✅ Deterministic (same seed → same result) +- ✅ No syntax errors +- ✅ Proper probability distribution + +### Sample Output + +**Progressive Loading Success**: +``` +======================================== +Progressive Loading Verification +======================================== + +Step 1: Initial state + On-demand mode: True + Total available: 1000 + Loaded wildcards: 0 + +Step 2: Access __samples/flower__ + Result: rose + Loaded wildcards: 1 +✓ PASS + +Step 3: Access __dragon__ + Result: ancient dragon + Loaded wildcards: 3 +✓ PASS + +🎉 ALL TESTS PASSED +Progressive on-demand loading verified! +``` + +**Lazy Loading Success**: +``` +======================================== +COMPARISON RESULTS +======================================== + +Test: Wildcard List (before) +✓ Results MATCH + +Test: Simple Wildcard +✓ Results MATCH + +Test: Depth 3 Transitive +✓ Results MATCH + +🎉 ALL TESTS PASSED +On-demand produces IDENTICAL results! +``` + +--- + +## Troubleshooting + +### Common Issues + +#### 1. Server Fails to Start + +**Symptoms**: +``` +✗ Server failed to start +curl: (7) Failed to connect +``` + +**Solutions**: +```bash +# Check if port in use +lsof -i :8188 +netstat -tlnp | grep 8188 + +# Kill existing processes +pkill -f "python.*main.py" + +# Increase startup wait time +# In test script: sleep 15 → sleep 30 +``` + +#### 2. Module Not Found (Python) + +**Symptoms**: +``` +ModuleNotFoundError: No module named 'modules' +``` + +**Solutions**: +```bash +# Option 1: Run from ComfyUI directory +cd /path/to/ComfyUI +python3 custom_nodes/comfyui-impact-pack/tests/test_progressive_loading.py + +# Option 2: Add to PYTHONPATH +export PYTHONPATH=/path/to/ComfyUI/custom_nodes/comfyui-impact-pack:$PYTHONPATH +python3 test_progressive_loading.py +``` + +#### 3. On-Demand Mode Not Activating + +**Symptoms**: +``` +Using full cache mode. +``` + +**Check**: +```bash +# View total size +grep "Wildcard total size" /tmp/progressive_test.log + +# Check cache limit +grep "cache_limit_mb" impact-pack.ini +``` + +**Solutions**: +```bash +# Force on-demand mode +cat > impact-pack.ini << EOF +[default] +wildcard_cache_limit_mb = 0.5 +EOF +``` + +#### 4. Tests Timeout + +**Symptoms**: +``` +Waiting for server startup... +✗ Server failed to start +``` + +**Solutions**: +```bash +# Check system resources +free -h +df -h + +# View server logs +tail -100 /tmp/progressive_test.log + +# Manually test server +cd /path/to/ComfyUI +bash run.sh --port 8195 + +# Increase timeout in test +# sleep 15 → sleep 60 +``` + +#### 5. Results Don't Match + +**Symptoms**: +``` +✗ Results DIFFER +``` + +**Debug**: +```bash +# Compare results +diff /tmp/result_full_cache_simple.json /tmp/result_on_demand_simple.json + +# Check seeds are same +grep "seed" /tmp/result_*.json + +# Verify same wildcard files used +ls -la wildcards/samples/flower.txt +``` + +**File Bug Report**: +- Wildcard text +- Seed value +- Full cache result +- On-demand result +- Server logs + +#### 6. Slow Performance + +**Symptoms**: +- Tests take much longer than expected +- Server startup > 2 minutes + +**Check**: +```bash +# Wildcard size +du -sh wildcards/ + +# Disk I/O +iostat -x 1 5 + +# System resources +top +``` + +**Solutions**: +- Use SSD (not HDD) +- Reduce wildcard size +- Increase cache limit (use full cache mode) +- Close other applications + +--- + +## Performance Benchmarks + +### Expected Performance + +**Small Dataset (< 50MB)**: +``` +Mode: Full cache +Startup: < 10 seconds +Memory: ~50MB +First access: Instant +``` + +**Medium Dataset (50MB - 1GB)**: +``` +Mode: On-demand +Startup: < 30 seconds +Memory: < 200MB initial +First access: 10-50ms per wildcard +``` + +**Large Dataset (10GB+)**: +``` +Mode: On-demand +Startup: < 1 minute +Memory: < 100MB initial +First access: 10-50ms per wildcard +Memory growth: Progressive +``` + +### Optimization Tips + +**For Faster Tests**: +1. Use smaller wildcard dataset +2. Run specific tests (not all) +3. Use manual server (keep running) +4. Skip sleep times (if server already running) + +**For Large Datasets**: +1. Verify on-demand mode activates +2. Monitor `/list/loaded` to track memory +3. Use SSD for file storage +4. Organize wildcards into subdirectories + +--- + +## Contributing + +### Adding New Tests + +**1. Create Test File**: +```bash +touch tests/test_new_feature.sh +chmod +x tests/test_new_feature.sh +``` + +**2. Test Template**: +```bash +#!/bin/bash +# Test: New Feature +# Purpose: Verify new feature works correctly + +set -e + +PORT=8XXX +IMPACT_DIR="/path/to/comfyui-impact-pack" + +# Setup config +cat > impact-pack.ini << EOF +[default] +wildcard_cache_limit_mb = 50 +EOF + +# Start server +cd /path/to/ComfyUI +bash run.sh --port $PORT > /tmp/test_new.log 2>&1 & +sleep 15 + +# Test +RESULT=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list) + +# Validate +if [ "$RESULT" = "expected" ]; then + echo "✅ PASS" + exit 0 +else + echo "❌ FAIL" + exit 1 +fi +``` + +**3. Update Documentation**: +- Add test description to this README +- Update test count +- Add to appropriate category + +### Testing Guidelines + +**Test Structure**: +1. Clear purpose statement +2. Setup (config, wildcards) +3. Execution (API calls, processing) +4. Validation (assertions, comparisons) +5. Cleanup (kill servers, restore config) + +**Good Practices**: +- Use unique port numbers +- Clean up background processes +- Provide clear success/failure messages +- Log to `/tmp/` for debugging +- Use deterministic seeds +- Test both modes (full cache + on-demand) + +--- + +## Reference + +### Test Files Quick Reference + +```bash +# Progressive loading +test_progressive_ondemand.sh # Integration test +test_progressive_loading.py # Unit test + +# Lazy loading +test_lazy_load_api.sh # Integration test +test_wildcard_lazy_loading.py # Unit test + +# Sequential/transitive +test_sequential_loading.sh # Integration test +find_transitive_wildcards.sh # Utility + +# Features +test_versatile_prompts.sh # Comprehensive features +test_wildcard_features.sh # Core features +test_wildcard_consistency.sh # Consistency + +# Validation +test_wildcard_final.py # Final validation +test_lazy_load_verification.py # Lazy load verification +``` + +### Documentation + +- **System Overview**: `../docs/WILDCARD_SYSTEM_OVERVIEW.md` +- **Testing Guide**: `../docs/WILDCARD_TESTING_GUIDE.md` + +### API Endpoints + +``` +GET /impact/wildcards/list # All available wildcards +GET /impact/wildcards/list/loaded # Actually loaded (progressive) +POST /impact/wildcards # Process wildcard text +GET /impact/wildcards/refresh # Reload all wildcards +``` + +--- + +**Last Updated**: 2024-11-17 +**Total Tests**: 11 files, 100+ scenarios +**Coverage**: ~95% of wildcard features diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/find_deep_transitive.py b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/find_deep_transitive.py new file mode 100644 index 0000000000000000000000000000000000000000..0b87e6ac8ee817b1b5c0efa042b330f0a649ba55 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/find_deep_transitive.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +"""Find deep transitive wildcard references (5+ levels)""" + +import re +from pathlib import Path +from collections import defaultdict + +# Auto-detect paths +SCRIPT_DIR = Path(__file__).parent +IMPACT_PACK_DIR = SCRIPT_DIR.parent +WILDCARDS_DIR = IMPACT_PACK_DIR / "wildcards" +CUSTOM_WILDCARDS_DIR = IMPACT_PACK_DIR / "custom_wildcards" + +# Build wildcard reference graph +wildcard_refs = defaultdict(set) # wildcard -> set of wildcards it references +wildcard_files = {} # wildcard_name -> file_path + +def normalize_name(name): + """Normalize wildcard name""" + return name.lower().replace('/', '_').replace('\\', '_') + +def find_wildcard_file(name): + """Find wildcard file by name""" + # Try different variations + variations = [ + name, + name.replace('/', '_'), + name.replace('\\', '_'), + ] + + for var in variations: + # Check in wildcards/ + for ext in ['.txt', '.yaml', '.yml']: + path = WILDCARDS_DIR / f"{var}{ext}" + if path.exists(): + return str(path) + + # Check in custom_wildcards/ + for ext in ['.txt', '.yaml', '.yml']: + path = CUSTOM_WILDCARDS_DIR / f"{var}{ext}" + if path.exists(): + return str(path) + + return None + +def scan_wildcards(): + """Scan all wildcard files and build reference graph""" + print("Scanning wildcard files...") + + # Find all wildcard files + for base_dir in [WILDCARDS_DIR, CUSTOM_WILDCARDS_DIR]: + for ext in ['*.txt', '*.yaml', '*.yml']: + for file_path in base_dir.rglob(ext): + # Get wildcard name from file path + rel_path = file_path.relative_to(base_dir) + name = str(rel_path.with_suffix('')).replace('/', '_').replace('\\', '_') + wildcard_files[normalize_name(name)] = str(file_path) + + # Find references in file + try: + content = file_path.read_text(encoding='utf-8', errors='ignore') + refs = re.findall(r'__([^_]+(?:/[^_]+)*)__', content) + + for ref in refs: + ref_normalized = normalize_name(ref) + if ref_normalized and ref_normalized != '': + wildcard_refs[normalize_name(name)].add(ref_normalized) + except Exception as e: + print(f"Error reading {file_path}: {e}") + + print(f"Found {len(wildcard_files)} wildcard files") + print(f"Found {sum(len(refs) for refs in wildcard_refs.values())} references") + print() + +def find_max_depth(start_wildcard, visited=None, path=None): + """Find maximum depth of transitive references starting from a wildcard""" + if visited is None: + visited = set() + if path is None: + path = [] + + if start_wildcard in visited: + return 0, path # Cycle detected + + visited.add(start_wildcard) + path.append(start_wildcard) + + refs = wildcard_refs.get(start_wildcard, set()) + + if not refs: + return 1, path # Leaf node + + max_depth = 0 + max_path = path.copy() + + for ref in refs: + if ref in wildcard_files: # Only follow if target exists + depth, sub_path = find_max_depth(ref, visited.copy(), path.copy()) + if depth > max_depth: + max_depth = depth + max_path = sub_path + + return max_depth + 1, max_path + +def main(): + scan_wildcards() + + # Find wildcards with references + wildcards_with_refs = [(name, refs) for name, refs in wildcard_refs.items() if refs] + + print(f"Analyzing {len(wildcards_with_refs)} wildcards with references...") + print() + + # Calculate depth for each wildcard + depths = [] + for name, refs in wildcards_with_refs: + depth, path = find_max_depth(name) + if depth >= 2: # At least one level of transitive reference + depths.append((depth, name, path)) + + # Sort by depth (deepest first) + depths.sort(reverse=True) + + print("=" * 80) + print("WILDCARD REFERENCE DEPTH ANALYSIS") + print("=" * 80) + print() + + # Show top 20 deepest + print("Top 20 Deepest Transitive References:") + print() + for i, (depth, name, path) in enumerate(depths[:20], 1): + print(f"{i}. Depth {depth}: __{name}__") + print(f" Path: {' → '.join(f'__{p}__' for p in path)}") + if name in wildcard_files: + print(f" File: {wildcard_files[name]}") + print() + + # Find 5+ depth wildcards + deep_wildcards = [(depth, name, path) for depth, name, path in depths if depth >= 5] + + print() + print("=" * 80) + print(f"WILDCARDS WITH 5+ DEPTH ({len(deep_wildcards)} found)") + print("=" * 80) + print() + + if deep_wildcards: + for depth, name, path in deep_wildcards: + print(f"🎯 __{name}__ (Depth: {depth})") + print(f" Chain: {' → '.join(f'__{p}__' for p in path)}") + if name in wildcard_files: + print(f" File: {wildcard_files[name]}") + print() + + print() + print("=" * 80) + print("RECOMMENDED TEST CASE") + print("=" * 80) + print() + depth, name, path = deep_wildcards[0] + print(f"Use __{name}__ for testing deep transitive loading") + print(f"Depth: {depth} levels") + print(f"Chain: {' → '.join(f'__{p}__' for p in path)}") + print() + print(f"Test input: \"__{name}__\"") + print(f"Expected: Will resolve through {depth} levels to actual content") + else: + print("No wildcards with 5+ depth found.") + print() + if depths: + depth, name, path = depths[0] + print(f"Maximum depth found: {depth}") + print(f"Wildcard: __{name}__") + print(f"Chain: {' → '.join(f'__{p}__' for p in path)}") + +if __name__ == "__main__": + main() diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/find_transitive_wildcards.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/find_transitive_wildcards.sh new file mode 100644 index 0000000000000000000000000000000000000000..41e24d2296c73880e036b5bac4523ffd33f905f2 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/find_transitive_wildcards.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# Find transitive wildcard references in the wildcard directories + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_PACK_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +WILDCARDS_DIR="$IMPACT_PACK_DIR/wildcards" +CUSTOM_WILDCARDS_DIR="$IMPACT_PACK_DIR/custom_wildcards" + +echo "==========================================" +echo "Transitive Wildcard Reference Scanner" +echo "==========================================" +echo "" + +echo "Scanning for wildcard references (pattern: __*__)..." +echo "" + +# Function to find references in a file +find_references() { + local file=$1 + local relative_path=${file#$IMPACT_PACK_DIR/} + + # Find all __wildcard__ patterns in the file + local refs=$(grep -o '__[^_]*__' "$file" 2>/dev/null | sort -u) + + if [ -n "$refs" ]; then + echo "📄 $relative_path" + echo " References:" + echo "$refs" | while read -r ref; do + # Remove __ from both ends + local clean_ref=${ref#__} + clean_ref=${clean_ref%__} + + # Check if referenced file exists + local found=false + + # Check in wildcards/ + if [ -f "$WILDCARDS_DIR/$clean_ref.txt" ]; then + echo " → $ref (wildcards/$clean_ref.txt) ✓" + found=true + elif [ -f "$WILDCARDS_DIR/$clean_ref.yaml" ]; then + echo " → $ref (wildcards/$clean_ref.yaml) ✓" + found=true + elif [ -f "$WILDCARDS_DIR/$clean_ref.yml" ]; then + echo " → $ref (wildcards/$clean_ref.yml) ✓" + found=true + fi + + # Check in custom_wildcards/ + if [ -f "$CUSTOM_WILDCARDS_DIR/$clean_ref.txt" ]; then + echo " → $ref (custom_wildcards/$clean_ref.txt) ✓" + found=true + elif [ -f "$CUSTOM_WILDCARDS_DIR/$clean_ref.yaml" ]; then + echo " → $ref (custom_wildcards/$clean_ref.yaml) ✓" + found=true + elif [ -f "$CUSTOM_WILDCARDS_DIR/$clean_ref.yml" ]; then + echo " → $ref (custom_wildcards/$clean_ref.yml) ✓" + found=true + fi + + if [ "$found" = false ]; then + echo " → $ref ❌ (not found)" + fi + done + echo "" + fi +} + +# Scan TXT files +echo "=== TXT Files with References ===" +echo "" +find "$WILDCARDS_DIR" "$CUSTOM_WILDCARDS_DIR" -name "*.txt" 2>/dev/null | while read -r file; do + find_references "$file" +done + +# Scan YAML files +echo "" +echo "=== YAML Files with References ===" +echo "" +find "$WILDCARDS_DIR" "$CUSTOM_WILDCARDS_DIR" -name "*.yaml" -o -name "*.yml" 2>/dev/null | while read -r file; do + find_references "$file" +done + +echo "" +echo "==========================================" +echo "Recommended Test Cases" +echo "==========================================" +echo "" +echo "1. Simple TXT wildcard:" +echo " Input: __samples/flower__" +echo " Type: Direct reference (no transitive)" +echo "" + +# Find a good transitive TXT example +echo "2. TXT → TXT transitive:" +find "$CUSTOM_WILDCARDS_DIR" -name "*.txt" -exec grep -l "__.*__" {} \; 2>/dev/null | head -1 | while read -r file; do + local basename=$(basename "$file" .txt) + local first_ref=$(grep -o '__[^_]*__' "$file" 2>/dev/null | head -1) + echo " Input: __${basename}__" + echo " Resolves to: $first_ref (and others)" + echo " File: ${file#$IMPACT_PACK_DIR/}" +done +echo "" + +echo "3. YAML transitive:" +echo " Input: __colors__" +echo " Resolves to: __cold__ or __warm__ → blue|red|orange|yellow" +echo " File: custom_wildcards/test.yaml" +echo "" + +echo "==========================================" +echo "Scan Complete" +echo "==========================================" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/run_quick_test.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/run_quick_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..30e81f31947bfb1a69cd63200c0d6e2185410818 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/run_quick_test.sh @@ -0,0 +1,74 @@ +#!/bin/bash +# Quick test for wildcard lazy loading + +echo "==========================================" +echo "Wildcard Lazy Load Quick Test" +echo "==========================================" +echo "" + +# Test 1: Get wildcard list (before accessing any wildcards) +echo "=== Test 1: Wildcard List (BEFORE access) ===" +curl -s http://127.0.0.1:8188/impact/wildcards/list > /tmp/wc_list_before.json +COUNT_BEFORE=$(cat /tmp/wc_list_before.json | python3 -c "import sys, json; print(len(json.load(sys.stdin).get('data', [])))") +echo "Total wildcards: $COUNT_BEFORE" +echo "" + +# Test 2: Simple wildcard +echo "=== Test 2: Simple Wildcard ===" +curl -s -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__samples/flower__", "seed": 42}' > /tmp/wc_simple.json +RESULT2=$(cat /tmp/wc_simple.json | python3 -c "import sys, json; print(json.load(sys.stdin).get('text', 'ERROR'))") +echo "Input: __samples/flower__" +echo "Output: $RESULT2" +echo "" + +# Test 3: Depth 3 transitive +echo "=== Test 3: Depth 3 Transitive (TXT→TXT→TXT) ===" +curl -s -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__adnd__ creature", "seed": 222}' > /tmp/wc_depth3.json +RESULT3=$(cat /tmp/wc_depth3.json | python3 -c "import sys, json; print(json.load(sys.stdin).get('text', 'ERROR'))") +echo "Input: __adnd__ creature" +echo "Output: $RESULT3" +echo "Chain: adnd → (dragon/beast/...) → (dragon_spirit/...)" +echo "" + +# Test 4: YAML transitive +echo "=== Test 4: YAML Transitive ===" +curl -s -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__colors__", "seed": 333}' > /tmp/wc_yaml.json +RESULT4=$(cat /tmp/wc_yaml.json | python3 -c "import sys, json; print(json.load(sys.stdin).get('text', 'ERROR'))") +echo "Input: __colors__" +echo "Output: $RESULT4" +echo "Chain: colors → (cold|warm) → (blue|red|orange|yellow)" +echo "" + +# Test 5: Get wildcard list (AFTER accessing wildcards) +echo "=== Test 5: Wildcard List (AFTER access) ===" +curl -s http://127.0.0.1:8188/impact/wildcards/list > /tmp/wc_list_after.json +COUNT_AFTER=$(cat /tmp/wc_list_after.json | python3 -c "import sys, json; print(len(json.load(sys.stdin).get('data', [])))") +echo "Total wildcards: $COUNT_AFTER" +echo "" + +# Compare +echo "==========================================" +echo "Results" +echo "==========================================" +echo "" +if [ "$COUNT_BEFORE" -eq "$COUNT_AFTER" ]; then + echo "✅ Wildcard list unchanged: $COUNT_BEFORE = $COUNT_AFTER" +else + echo "❌ Wildcard list changed: $COUNT_BEFORE != $COUNT_AFTER" +fi + +if [ "$RESULT2" != "ERROR" ] && [ "$RESULT3" != "ERROR" ] && [ "$RESULT4" != "ERROR" ]; then + echo "✅ All wildcards resolved successfully" +else + echo "❌ Some wildcards failed" +fi + +echo "" +echo "Check /tmp/comfyui_ondemand.log for loading mode" +grep -i "wildcard.*mode" /tmp/comfyui_ondemand.log | tail -1 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/samples/README.md b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/samples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3c2eaf97af040b8a79cb516d675fbccfce3c4118 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/samples/README.md @@ -0,0 +1,186 @@ +# Test Wildcard Files Documentation + +This directory contains test wildcard files created to validate various features and edge cases of the wildcard system. + +## Test Categories + +### 1. Error Handling Tests + +**test_error_cases.txt** +- Purpose: Test handling of non-existent wildcard references +- Contains: References to `__nonexistent_wildcard__` that should be handled gracefully +- Expected: System should not crash, provide meaningful error or leave unexpanded + +**test_circular_a.txt + test_circular_b.txt** +- Purpose: Test circular reference detection (A→B→A) +- Contains: Mutual references between two wildcards +- Expected: System should detect cycle and prevent infinite loop (max 100 iterations) + +### 2. Encoding Tests + +**test_encoding_utf8.txt** +- Purpose: Test UTF-8 multi-language support +- Contains: + - Emoji: 🌸🌺🌼🌻🌷 + - Japanese: さくら, はな, 美しい花, 桜の木 + - Chinese: 花, 玫瑰, 莲花, 牡丹 + - Korean: 꽃, 장미, 벚꽃 + - Arabic (RTL): زهرة, وردة + - Mixed: `🌸 beautiful 美しい flower زهرة 꽃` +- Expected: All characters render correctly, no encoding errors + +**test_encoding_emoji.txt** +- Purpose: Test emoji handling across categories +- Contains: Nature, animals, food, hearts, and mixed emoji with text +- Expected: Emojis render correctly in results + +**test_encoding_special.txt** +- Purpose: Test special Unicode characters +- Contains: + - Mathematical symbols: ∀∂∃∅∆∇∈∉ + - Greek letters: α β γ δ ε ζ + - Currency: $ € £ ¥ ₹ ₽ ₩ + - Box drawing: ┌─┬─┐ + - Diacritics: Café résumé naïve Zürich + - Special punctuation: … — – • · ° +- Expected: All symbols preserved correctly + +### 3. Edge Case Tests + +**test_edge_empty_lines.txt** +- Purpose: Test handling of empty lines and whitespace-only lines +- Contains: Options separated by variable empty lines +- Expected: Empty lines ignored, only non-empty options selected + +**test_edge_whitespace.txt** +- Purpose: Test leading/trailing whitespace handling +- Contains: Options with tabs, spaces, mixed whitespace +- Expected: Whitespace handling according to parser rules + +**test_edge_long_lines.txt** +- Purpose: Test very long line handling +- Contains: + - Short lines + - Medium lines (~100 chars) + - Very long lines with spaces (>200 chars) + - Ultra-long lines without spaces (continuous text) +- Expected: No truncation or memory issues, proper handling + +**test_edge_special_chars.txt** +- Purpose: Test special characters that might cause parsing issues +- Contains: + - Embedded wildcard syntax: `__wildcard__` as literal text + - Dynamic prompt syntax: `{option|option}` as literal text + - Regex special chars: `.`, `*`, `+`, `?`, `|`, `\`, `$`, `^` + - Quote characters: `"`, `'`, `` ` `` + - HTML special chars: `&`, `<`, `>`, `=` +- Expected: Special chars treated as literal text in final output + +**test_edge_case_insensitive.txt** +- Purpose: Validate case-insensitive wildcard matching +- Contains: Options in various case patterns +- Expected: `__test_edge_case_insensitive__` and `__TEST_EDGE_CASE_INSENSITIVE__` return same results + +**test_comments.txt** +- Purpose: Test comment handling with `#` prefix +- Contains: Lines starting with `#` mixed with valid options +- Expected: Comment lines ignored, only non-comment lines selected + +### 4. Deep Nesting Tests (7 levels) + +**test_nesting_level1.txt → test_nesting_level7.txt** +- Purpose: Test transitive wildcard expansion up to 7 levels +- Structure: + - Level 1 → references Level 2 + - Level 2 → references Level 3 + - ... + - Level 7 → final options (no further references) +- Usage: Access `__test_nesting_level1__` to trigger 7-level expansion +- Expected: All levels expand correctly, result from level 7 appears + +### 5. Syntax Feature Tests + +**test_quantifier.txt** +- Purpose: Test quantifier syntax `N#__wildcard__` +- Contains: List of color options +- Usage: `3#__test_quantifier__` should expand to 3 repeated wildcards +- Expected: Correct repetition and expansion + +**test_pattern_match.txt** +- Purpose: Test pattern matching `__*/name__` +- Contains: Options with identifiable pattern +- Usage: `__*/test_pattern_match__` should match this file +- Expected: Depth-agnostic matching works correctly + +## Test Usage Examples + +### Basic Test +```bash +curl -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__test_encoding_emoji__", "seed": 42}' +``` + +### Nesting Test +```bash +curl -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__test_nesting_level1__", "seed": 42}' +``` + +### Error Handling Test +```bash +curl -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__test_error_cases__", "seed": 42}' +``` + +### Circular Reference Test +```bash +curl -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__test_circular_a__", "seed": 42}' +``` + +### Quantifier Test +```bash +curl -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "3#__test_quantifier__", "seed": 42}' +``` + +### Pattern Matching Test +```bash +curl -X POST http://127.0.0.1:8188/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__*/test_pattern_match__", "seed": 42}' +``` + +## Test Coverage + +These test files address the following critical gaps identified in the test coverage analysis: + +1. ✅ **Error Handling** - Missing wildcard files, circular references +2. ✅ **UTF-8 Encoding** - Multi-language support (emoji, CJK, RTL) +3. ✅ **Edge Cases** - Empty lines, whitespace, long lines, special chars +4. ✅ **Deep Nesting** - 7-level transitive expansion +5. ✅ **Comment Handling** - Lines starting with `#` +6. ✅ **Case Insensitivity** - Case-insensitive wildcard matching +7. ✅ **Pattern Matching** - `__*/name__` syntax +8. ✅ **Quantifiers** - `N#__wildcard__` syntax + +## Expected Test Results + +All tests should: +- Not crash the system +- Return valid results or graceful error messages +- Preserve character encoding correctly +- Handle edge cases without data corruption +- Respect the 100-iteration limit for circular references +- Demonstrate deterministic behavior with same seed + +--- + +**Created**: 2025-11-18 +**Purpose**: Test coverage validation for wildcard system +**Total Files**: 21 test wildcard files diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_lazy_load_api.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_lazy_load_api.sh new file mode 100644 index 0000000000000000000000000000000000000000..695dcfba13485ba07ec26581a3f1880dda4717bc --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_lazy_load_api.sh @@ -0,0 +1,225 @@ +#!/bin/bash +# Verify wildcard lazy loading through ComfyUI API + +set -e + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_PACK_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +COMFYUI_DIR="$(cd "$IMPACT_PACK_DIR/../.." && pwd)" +CONFIG_FILE="$IMPACT_PACK_DIR/impact-pack.ini" +BACKUP_CONFIG="$IMPACT_PACK_DIR/impact-pack.ini.backup" + +GREEN='\033[0;32m' +RED='\033[0;31m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' + +echo "==========================================" +echo "Wildcard Lazy Load Verification Test" +echo "==========================================" +echo "" +echo "This test verifies that on-demand loading produces" +echo "identical results to full cache mode." +echo "" + +# Backup original config +if [ -f "$CONFIG_FILE" ]; then + cp "$CONFIG_FILE" "$BACKUP_CONFIG" + echo "✓ Backed up original config" +fi + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py" 2>/dev/null || true + sleep 2 +} + +# Test with specific configuration +test_mode() { + local MODE=$1 + local CACHE_LIMIT=$2 + local PORT=$3 + + echo "" + echo "${BLUE}=========================================${NC}" + echo "${BLUE}Testing: $MODE (limit: ${CACHE_LIMIT}MB, port: $PORT)${NC}" + echo "${BLUE}=========================================${NC}" + + # Update config + cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_PACK_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = $CACHE_LIMIT +EOF + + # Start server + cleanup + cd "$COMFYUI_DIR" + bash run.sh --listen 127.0.0.1 --port $PORT > /tmp/comfyui_${MODE}.log 2>&1 & + COMFYUI_PID=$! + + echo "Waiting for server startup..." + sleep 15 + + # Check server + if ! curl -s http://127.0.0.1:$PORT/ > /dev/null; then + echo "${RED}✗ Server failed to start${NC}" + cat /tmp/comfyui_${MODE}.log | grep -i "wildcard\|error" | tail -20 + return 1 + fi + + # Get loading mode from log + MODE_LOG=$(grep -i "wildcard.*mode" /tmp/comfyui_${MODE}.log | tail -1) + echo "${YELLOW}$MODE_LOG${NC}" + echo "" + + # Test 1: Get wildcard list (BEFORE any access in on-demand mode) + echo "📋 Test 1: Get wildcard list" + LIST_RESULT=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list) + LIST_COUNT=$(echo "$LIST_RESULT" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") + echo " Total wildcards: $LIST_COUNT" + echo " Sample: $(echo "$LIST_RESULT" | python3 -c "import sys, json; print(', '.join(json.load(sys.stdin)['data'][:10]))")" + echo "$LIST_RESULT" > /tmp/result_${MODE}_list.json + echo "" + + # Test 2: Simple wildcard + echo "📋 Test 2: Simple wildcard" + RESULT1=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__samples/flower__", "seed": 42}') + TEXT1=$(echo "$RESULT1" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: __samples/flower__" + echo " Output: $TEXT1" + echo "$RESULT1" > /tmp/result_${MODE}_simple.json + echo "" + + # Test 3: Depth 3 transitive (adnd → dragon → dragon_spirit) + echo "📋 Test 3: Depth 3 transitive (TXT → TXT → TXT)" + RESULT2=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__adnd__ creature", "seed": 222}') + TEXT2=$(echo "$RESULT2" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: __adnd__ creature (depth 3: adnd → dragon → dragon_spirit)" + echo " Output: $TEXT2" + echo "$RESULT2" > /tmp/result_${MODE}_depth3.json + echo "" + + # Test 4: YAML transitive (colors → cold/warm → blue/red/orange/yellow) + echo "📋 Test 4: YAML transitive" + RESULT3=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__colors__", "seed": 333}') + TEXT3=$(echo "$RESULT3" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: __colors__ (YAML: colors → cold|warm → blue|red|orange|yellow)" + echo " Output: $TEXT3" + echo "$RESULT3" > /tmp/result_${MODE}_yaml.json + echo "" + + # Test 5: Get wildcard list AGAIN (AFTER access in on-demand mode) + echo "📋 Test 5: Get wildcard list (after access)" + LIST_RESULT2=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list) + LIST_COUNT2=$(echo "$LIST_RESULT2" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") + echo " Total wildcards: $LIST_COUNT2" + echo "$LIST_RESULT2" > /tmp/result_${MODE}_list_after.json + echo "" + + # Compare before/after list + if [ "$MODE" = "on_demand" ]; then + if [ "$LIST_COUNT" -eq "$LIST_COUNT2" ]; then + echo "${GREEN}✓ Wildcard list unchanged after access (${LIST_COUNT} = ${LIST_COUNT2})${NC}" + else + echo "${RED}✗ Wildcard list changed after access (${LIST_COUNT} != ${LIST_COUNT2})${NC}" + fi + echo "" + fi + + cleanup + + echo "${GREEN}✓ $MODE tests completed${NC}" + echo "" +} + +# Run tests +test_mode "full_cache" 100 8190 +test_mode "on_demand" 1 8191 + +# Compare results +echo "" +echo "==========================================" +echo "COMPARISON RESULTS" +echo "==========================================" +echo "" + +compare_test() { + local TEST_NAME=$1 + local FILE_SUFFIX=$2 + + echo "Test: $TEST_NAME" + DIFF=$(diff /tmp/result_full_cache_${FILE_SUFFIX}.json /tmp/result_on_demand_${FILE_SUFFIX}.json || true) + if [ -z "$DIFF" ]; then + echo "${GREEN}✓ Results MATCH${NC}" + else + echo "${RED}✗ Results DIFFER${NC}" + echo "Difference:" + echo "$DIFF" | head -10 + fi + echo "" +} + +compare_test "Wildcard List (before access)" "list" +compare_test "Simple Wildcard" "simple" +compare_test "Depth 3 Transitive" "depth3" +compare_test "YAML Transitive" "yaml" +compare_test "Wildcard List (after access)" "list_after" + +# Summary +echo "==========================================" +echo "SUMMARY" +echo "==========================================" +echo "" + +ALL_MATCH=true +for suffix in list simple depth3 yaml list_after; do + if ! diff /tmp/result_full_cache_${suffix}.json /tmp/result_on_demand_${suffix}.json > /dev/null 2>&1; then + ALL_MATCH=false + break + fi +done + +if [ "$ALL_MATCH" = true ]; then + echo "${GREEN}🎉 ALL TESTS PASSED${NC}" + echo "${GREEN}On-demand loading produces IDENTICAL results to full cache mode!${NC}" + EXIT_CODE=0 +else + echo "${RED}❌ TESTS FAILED${NC}" + echo "${RED}On-demand loading has consistency issues!${NC}" + EXIT_CODE=1 +fi +echo "" + +# Restore config +if [ -f "$BACKUP_CONFIG" ]; then + mv "$BACKUP_CONFIG" "$CONFIG_FILE" + echo "✓ Restored original config" +fi + +cleanup + +echo "" +echo "==========================================" +echo "Test Complete" +echo "==========================================" + +exit $EXIT_CODE diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_lazy_load_verification.py b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_lazy_load_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..3a27515c0963b0121d54b2f6687a3f11db296163 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_lazy_load_verification.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 +""" +Verify that wildcard lists are identical before and after on-demand loading. + +This test ensures that LazyWildcardLoader maintains consistency: +1. Full cache mode: all data loaded immediately +2. On-demand mode (before access): LazyWildcardLoader proxies +3. On-demand mode (after access): data loaded on demand + +All three scenarios should produce identical wildcard lists and values. +""" + +import sys +import os + +# Add parent directory to path +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from modules.impact import config +from modules.impact.wildcards import wildcard_load, wildcard_dict, is_on_demand_mode, process + + +def get_wildcard_list(): + """Get list of all wildcard keys""" + return sorted(list(wildcard_dict.keys())) + + +def get_wildcard_sample_values(wildcards_to_test=None): + """Get sample values from specific wildcards""" + if wildcards_to_test is None: + wildcards_to_test = [ + 'samples/flower', + 'samples/jewel', + 'adnd', # Depth 3 transitive + 'all', # Depth 3 transitive + 'colors', # YAML transitive + ] + + values = {} + for key in wildcards_to_test: + if key in wildcard_dict: + data = wildcard_dict[key] + # Convert to list if it's a LazyWildcardLoader + if hasattr(data, 'get_data'): + data = data.get_data() + values[key] = list(data) if data else [] + else: + values[key] = None + + return values + + +def test_full_cache_mode(): + """Test with full cache mode (limit = 100 MB)""" + print("=" * 80) + print("TEST 1: Full Cache Mode") + print("=" * 80) + print() + + # Set high cache limit to force full cache mode + config.get_config()['wildcard_cache_limit_mb'] = 100 + + # Reload wildcards + wildcard_load() + + # Check mode + mode = is_on_demand_mode() + print(f"Mode: {'On-Demand' if mode else 'Full Cache'}") + assert not mode, "Should be in Full Cache mode" + + # Get wildcard list + wc_list = get_wildcard_list() + print(f"Total wildcards: {len(wc_list)}") + print(f"Sample wildcards: {wc_list[:10]}") + print() + + # Get sample values + values = get_wildcard_sample_values() + print("Sample values:") + for key, val in values.items(): + if val is not None: + print(f" {key}: {len(val)} items - {val[:3] if len(val) >= 3 else val}") + else: + print(f" {key}: NOT FOUND") + print() + + return { + 'mode': 'full_cache', + 'wildcard_list': wc_list, + 'values': values, + } + + +def test_on_demand_mode_before_access(): + """Test with on-demand mode before accessing data""" + print("=" * 80) + print("TEST 2: On-Demand Mode (Before Access)") + print("=" * 80) + print() + + # Set low cache limit to force on-demand mode + config.get_config()['wildcard_cache_limit_mb'] = 1 + + # Reload wildcards + wildcard_load() + + # Check mode + mode = is_on_demand_mode() + print(f"Mode: {'On-Demand' if mode else 'Full Cache'}") + assert mode, "Should be in On-Demand mode" + + # Get wildcard list (should work even without loading data) + wc_list = get_wildcard_list() + print(f"Total wildcards: {len(wc_list)}") + print(f"Sample wildcards: {wc_list[:10]}") + print() + + # Check that wildcards are LazyWildcardLoader instances + lazy_count = sum(1 for k in wc_list if hasattr(wildcard_dict[k], 'get_data')) + print(f"LazyWildcardLoader instances: {lazy_count}/{len(wc_list)}") + print() + + return { + 'mode': 'on_demand_before', + 'wildcard_list': wc_list, + 'lazy_count': lazy_count, + } + + +def test_on_demand_mode_after_access(): + """Test with on-demand mode after accessing data""" + print("=" * 80) + print("TEST 3: On-Demand Mode (After Access)") + print("=" * 80) + print() + + # Mode should still be on-demand from previous test + mode = is_on_demand_mode() + print(f"Mode: {'On-Demand' if mode else 'Full Cache'}") + assert mode, "Should still be in On-Demand mode" + + # Get sample values (this will trigger lazy loading) + values = get_wildcard_sample_values() + print("Sample values (after access):") + for key, val in values.items(): + if val is not None: + print(f" {key}: {len(val)} items - {val[:3] if len(val) >= 3 else val}") + else: + print(f" {key}: NOT FOUND") + print() + + # Test deep transitive wildcards + print("Testing deep transitive wildcards:") + test_cases = [ + ("__adnd__", 42), # Depth 3: adnd → dragon → dragon_spirit + ("__all__", 123), # Depth 3: all → giant → giant_soldier + ] + + for wildcard_text, seed in test_cases: + result = process(wildcard_text, seed) + print(f" {wildcard_text} (seed={seed}): {result}") + print() + + return { + 'mode': 'on_demand_after', + 'wildcard_list': get_wildcard_list(), + 'values': values, + } + + +def compare_results(result1, result2, result3): + """Compare results from all three tests""" + print("=" * 80) + print("COMPARISON RESULTS") + print("=" * 80) + print() + + # Compare wildcard lists + list1 = result1['wildcard_list'] + list2 = result2['wildcard_list'] + list3 = result3['wildcard_list'] + + print("1. Wildcard List Comparison") + print(f" Full Cache: {len(list1)} wildcards") + print(f" On-Demand (before): {len(list2)} wildcards") + print(f" On-Demand (after): {len(list3)} wildcards") + + if list1 == list2 == list3: + print(" ✅ All lists are IDENTICAL") + else: + print(" ❌ Lists DIFFER") + if list1 != list2: + print(f" Full Cache vs On-Demand (before): {len(set(list1) - set(list2))} differences") + if list1 != list3: + print(f" Full Cache vs On-Demand (after): {len(set(list1) - set(list3))} differences") + if list2 != list3: + print(f" On-Demand (before) vs On-Demand (after): {len(set(list2) - set(list3))} differences") + print() + + # Compare sample values + values1 = result1['values'] + values3 = result3['values'] + + print("2. Sample Values Comparison") + all_match = True + for key in values1.keys(): + v1 = values1[key] + v3 = values3[key] + + if v1 == v3: + status = "✅ MATCH" + else: + status = "❌ DIFFER" + all_match = False + + print(f" {key}: {status}") + if v1 != v3: + print(f" Full Cache: {len(v1) if v1 else 0} items") + print(f" On-Demand: {len(v3) if v3 else 0} items") + print() + + if all_match: + print("✅ ALL VALUES MATCH - On-demand loading is CONSISTENT") + else: + print("❌ VALUES DIFFER - On-demand loading has ISSUES") + print() + + return list1 == list2 == list3 and all_match + + +def main(): + print() + print("=" * 80) + print("WILDCARD LAZY LOAD VERIFICATION TEST") + print("=" * 80) + print() + print("This test verifies that on-demand loading produces identical results") + print("to full cache mode.") + print() + + # Run tests + result1 = test_full_cache_mode() + result2 = test_on_demand_mode_before_access() + result3 = test_on_demand_mode_after_access() + + # Compare results + success = compare_results(result1, result2, result3) + + # Final result + print("=" * 80) + if success: + print("🎉 TEST PASSED - Lazy loading is working correctly!") + else: + print("❌ TEST FAILED - Lazy loading has consistency issues!") + print("=" * 80) + print() + + return 0 if success else 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_progressive_loading.py b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_progressive_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..8e756f78b999fb2d7dcd248547940ef18196d5e2 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_progressive_loading.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +""" +Progressive On-Demand Wildcard Loading Unit Tests + +Tests that wildcard loading happens progressively as wildcards are accessed. +""" +import sys +import os +import tempfile + +# Add parent directory to path +test_dir = os.path.dirname(os.path.abspath(__file__)) +impact_pack_dir = os.path.dirname(test_dir) +sys.path.insert(0, impact_pack_dir) + +from modules.impact import wildcards + + +def test_early_termination(): + """Test that calculate_directory_size stops early when limit exceeded""" + print("=" * 60) + print("TEST 1: Early Termination Size Calculation") + print("=" * 60) + + # Create temporary directory with test files + with tempfile.TemporaryDirectory() as tmpdir: + # Create files totaling 100 bytes + for i in range(10): + with open(os.path.join(tmpdir, f"test{i}.txt"), 'w') as f: + f.write("x" * 10) # 10 bytes each + + # Test without limit (should scan all) + total_size = wildcards.calculate_directory_size(tmpdir) + print(f"✓ Total size without limit: {total_size} bytes") + assert total_size == 100, f"Expected 100 bytes, got {total_size}" + + # Test with limit (should stop early) + limited_size = wildcards.calculate_directory_size(tmpdir, limit=50) + print(f"✓ Size with 50 byte limit: {limited_size} bytes") + assert limited_size >= 50, f"Expected >= 50 bytes, got {limited_size}" + assert limited_size <= total_size, "Limited should not exceed total" + + print(f"✓ Early termination working (stopped at {limited_size} bytes)") + print("\n✅ Early termination test PASSED\n") + + +def test_metadata_scan(): + """Test that scan_wildcard_metadata only scans file paths, not data""" + print("=" * 60) + print("TEST 2: Metadata-Only Scan") + print("=" * 60) + + # Create temporary wildcard directory + with tempfile.TemporaryDirectory() as tmpdir: + # Create test files + test_file1 = os.path.join(tmpdir, "test1.txt") + test_file2 = os.path.join(tmpdir, "test2.txt") + test_yaml = os.path.join(tmpdir, "test3.yaml") + + with open(test_file1, 'w') as f: + f.write("option1a\noption1b\noption1c\n") + + with open(test_file2, 'w') as f: + f.write("option2a\noption2b\n") + + with open(test_yaml, 'w') as f: + f.write("key1:\n - value1\n - value2\n") + + # Clear globals + wildcards.available_wildcards = {} + wildcards.loaded_wildcards = {} + + # Scan metadata only + print(f"✓ Scanning directory: {tmpdir}") + discovered = wildcards.scan_wildcard_metadata(tmpdir) + + print(f"✓ Discovered {discovered} wildcards") + assert discovered == 3, f"Expected 3 wildcards, got {discovered}" + + print(f"✓ Available wildcards: {list(wildcards.available_wildcards.keys())}") + assert len(wildcards.available_wildcards) == 3 + + # Verify that data is NOT loaded + assert len(wildcards.loaded_wildcards) == 0, "Data should not be loaded yet" + print("✓ No data loaded (metadata only)") + + # Verify file paths are stored + for key in wildcards.available_wildcards.keys(): + file_path = wildcards.available_wildcards[key] + assert os.path.exists(file_path), f"File path should exist: {file_path}" + print(f" - {key} -> {file_path}") + + print("\n✅ Metadata scan test PASSED\n") + + +def test_progressive_loading(): + """Test that wildcards are loaded progressively on access""" + print("=" * 60) + print("TEST 3: Progressive On-Demand Loading") + print("=" * 60) + + # Create temporary wildcard directory + with tempfile.TemporaryDirectory() as tmpdir: + # Create test files + test_file1 = os.path.join(tmpdir, "wildcard1.txt") + test_file2 = os.path.join(tmpdir, "wildcard2.txt") + test_file3 = os.path.join(tmpdir, "wildcard3.txt") + + with open(test_file1, 'w') as f: + f.write("option1a\noption1b\n") + + with open(test_file2, 'w') as f: + f.write("option2a\noption2b\n") + + with open(test_file3, 'w') as f: + f.write("option3a\noption3b\n") + + # Clear globals + wildcards.available_wildcards = {} + wildcards.loaded_wildcards = {} + wildcards._on_demand_mode = True + + # Scan metadata + discovered = wildcards.scan_wildcard_metadata(tmpdir) + print(f"✓ Discovered {discovered} wildcards") + print(f"✓ Available: {len(wildcards.available_wildcards)}") + print(f"✓ Loaded: {len(wildcards.loaded_wildcards)}") + + # Initial state: 3 available, 0 loaded + assert len(wildcards.available_wildcards) == 3 + assert len(wildcards.loaded_wildcards) == 0 + + # Access first wildcard + print("\nAccessing wildcard1...") + data1 = wildcards.get_wildcard_value("wildcard1") + assert data1 is not None, "Should load wildcard1" + assert len(data1) == 2, f"Expected 2 options, got {len(data1)}" + print(f"✓ Loaded wildcard1: {data1}") + print(f"✓ Loaded count: {len(wildcards.loaded_wildcards)}") + assert len(wildcards.loaded_wildcards) == 1, "Should have 1 loaded wildcard" + + # Access second wildcard + print("\nAccessing wildcard2...") + data2 = wildcards.get_wildcard_value("wildcard2") + assert data2 is not None, "Should load wildcard2" + print(f"✓ Loaded wildcard2: {data2}") + print(f"✓ Loaded count: {len(wildcards.loaded_wildcards)}") + assert len(wildcards.loaded_wildcards) == 2, "Should have 2 loaded wildcards" + + # Re-access first wildcard (should use cache) + print("\nRe-accessing wildcard1 (cached)...") + data1_again = wildcards.get_wildcard_value("wildcard1") + assert data1_again == data1, "Cached data should match" + print("✓ Cache hit, data matches") + print(f"✓ Loaded count: {len(wildcards.loaded_wildcards)}") + assert len(wildcards.loaded_wildcards) == 2, "Count should not increase on cache hit" + + # Access third wildcard + print("\nAccessing wildcard3...") + data3 = wildcards.get_wildcard_value("wildcard3") + assert data3 is not None, "Should load wildcard3" + print(f"✓ Loaded wildcard3: {data3}") + print(f"✓ Loaded count: {len(wildcards.loaded_wildcards)}") + assert len(wildcards.loaded_wildcards) == 3, "Should have 3 loaded wildcards" + + # Verify all loaded + assert set(wildcards.loaded_wildcards.keys()) == {"wildcard1", "wildcard2", "wildcard3"} + print("✓ All wildcards loaded progressively") + + print("\n✅ Progressive loading test PASSED\n") + + +def test_wildcard_list_functions(): + """Test get_wildcard_list() and get_loaded_wildcard_list()""" + print("=" * 60) + print("TEST 4: Wildcard List Functions") + print("=" * 60) + + # Create temporary wildcard directory + with tempfile.TemporaryDirectory() as tmpdir: + # Create test files + for i in range(5): + with open(os.path.join(tmpdir, f"test{i}.txt"), 'w') as f: + f.write(f"option{i}a\noption{i}b\n") + + # Clear globals + wildcards.available_wildcards = {} + wildcards.loaded_wildcards = {} + wildcards._on_demand_mode = True + + # Scan metadata + wildcards.scan_wildcard_metadata(tmpdir) + + # Test get_wildcard_list (should return all available) + all_wildcards = wildcards.get_wildcard_list() + print(f"✓ get_wildcard_list(): {len(all_wildcards)} wildcards") + assert len(all_wildcards) == 5, "Should return all available wildcards" + + # Test get_loaded_wildcard_list (should return 0 initially) + loaded_wildcards_list = wildcards.get_loaded_wildcard_list() + print(f"✓ get_loaded_wildcard_list(): {len(loaded_wildcards_list)} wildcards (initial)") + assert len(loaded_wildcards_list) == 0, "Should return no loaded wildcards initially" + + # Load some wildcards + wildcards.get_wildcard_value("test0") + wildcards.get_wildcard_value("test1") + + # Test get_loaded_wildcard_list (should return 2 now) + loaded_wildcards_list = wildcards.get_loaded_wildcard_list() + print(f"✓ get_loaded_wildcard_list(): {len(loaded_wildcards_list)} wildcards (after loading 2)") + assert len(loaded_wildcards_list) == 2, "Should return 2 loaded wildcards" + + # Verify loaded list is subset of available list + assert set(loaded_wildcards_list).issubset(set(all_wildcards)), "Loaded should be subset of available" + print("✓ Loaded list is subset of available list") + + print("\n✅ Wildcard list functions test PASSED\n") + + +def main(): + """Run all tests""" + print("\n" + "=" * 60) + print("PROGRESSIVE ON-DEMAND LOADING TEST SUITE") + print("=" * 60 + "\n") + + try: + test_early_termination() + test_metadata_scan() + test_progressive_loading() + test_wildcard_list_functions() + + print("=" * 60) + print("✅ ALL TESTS PASSED") + print("=" * 60) + return 0 + + except Exception as e: + print("\n" + "=" * 60) + print(f"❌ TEST FAILED: {e}") + print("=" * 60) + import traceback + traceback.print_exc() + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_progressive_ondemand.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_progressive_ondemand.sh new file mode 100644 index 0000000000000000000000000000000000000000..12ef413b871d5eee3d59683fcf48a60dc8d9cf46 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_progressive_ondemand.sh @@ -0,0 +1,270 @@ +#!/bin/bash +# Progressive On-Demand Wildcard Loading Test +# Verifies that wildcards are loaded progressively as they are accessed + +set -e + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_PACK_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +COMFYUI_DIR="$(cd "$IMPACT_PACK_DIR/../.." && pwd)" +CONFIG_FILE="$IMPACT_PACK_DIR/impact-pack.ini" +BACKUP_CONFIG="$IMPACT_PACK_DIR/impact-pack.ini.backup" +PORT=8195 + +GREEN='\033[0;32m' +RED='\033[0;31m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +echo "==========================================" +echo "Progressive On-Demand Loading Test" +echo "==========================================" +echo "" +echo "This test verifies that /wildcards/list/loaded" +echo "increases progressively as wildcards are accessed." +echo "" + +# Backup original config +if [ -f "$CONFIG_FILE" ]; then + cp "$CONFIG_FILE" "$BACKUP_CONFIG" + echo "✓ Backed up original config" +fi + +# Cleanup function +cleanup() { + echo "" + echo "Cleaning up..." + pkill -f "python.*main.py.*$PORT" 2>/dev/null || true + sleep 2 +} + +# Setup on-demand mode (low cache limit) +echo "${BLUE}Setting up on-demand mode configuration${NC}" +cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_PACK_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = 0.5 +EOF + +echo "✓ Configuration: on-demand mode (0.5MB limit)" +echo "" + +# Start server +cleanup +cd "$COMFYUI_DIR" +echo "Starting ComfyUI server on port $PORT..." +bash run.sh --listen 127.0.0.1 --port $PORT > /tmp/progressive_test.log 2>&1 & +COMFYUI_PID=$! + +echo "Waiting for server startup..." +sleep 15 + +# Check server +if ! curl -s http://127.0.0.1:$PORT/ > /dev/null; then + echo "${RED}✗ Server failed to start${NC}" + cat /tmp/progressive_test.log | grep -i "wildcard\|error" | tail -20 + exit 1 +fi + +echo "${GREEN}✓ Server started${NC}" +echo "" + +# Check loading mode from log +MODE_LOG=$(grep -i "wildcard.*mode" /tmp/progressive_test.log | tail -1) +echo "${YELLOW}$MODE_LOG${NC}" +echo "" + +# Test Progressive Loading +echo "==========================================" +echo "Progressive Loading Verification" +echo "==========================================" +echo "" + +# Step 1: Initial state (no wildcards accessed) +echo "${CYAN}Step 1: Initial state (before any wildcard access)${NC}" +RESPONSE=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded) +LOADED_COUNT=$(echo "$RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))" 2>/dev/null || echo "0") +ON_DEMAND=$(echo "$RESPONSE" | python3 -c "import sys, json; print(json.load(sys.stdin).get('on_demand_mode', False))" 2>/dev/null || echo "false") +TOTAL_AVAILABLE=$(echo "$RESPONSE" | python3 -c "import sys, json; print(json.load(sys.stdin).get('total_available', 0))" 2>/dev/null || echo "0") + +echo " On-demand mode: $ON_DEMAND" +echo " Total available wildcards: $TOTAL_AVAILABLE" +echo " Loaded wildcards: ${YELLOW}$LOADED_COUNT${NC}" + +if [ "$ON_DEMAND" != "True" ]; then + echo "${RED}✗ FAIL: On-demand mode not active!${NC}" + exit 1 +fi + +if [ "$LOADED_COUNT" -ne 0 ]; then + echo "${YELLOW}⚠ WARNING: Expected 0 loaded, got $LOADED_COUNT${NC}" +fi +echo "" + +# Step 2: Access first wildcard +echo "${CYAN}Step 2: Access first wildcard (__samples/flower__)${NC}" +RESULT1=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__samples/flower__", "seed": 42}') +TEXT1=$(echo "$RESULT1" | python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))") +echo " Result: $TEXT1" + +RESPONSE=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded) +LOADED_COUNT_1=$(echo "$RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") +echo " Loaded wildcards: ${YELLOW}$LOADED_COUNT_1${NC}" + +if [ "$LOADED_COUNT_1" -lt 1 ]; then + echo "${RED}✗ FAIL: Expected at least 1 loaded wildcard${NC}" + exit 1 +fi +echo "${GREEN}✓ PASS: Wildcard count increased${NC}" +echo "" + +# Step 3: Access second wildcard (different from first) +echo "${CYAN}Step 3: Access second wildcard (__dragon__)${NC}" +RESULT2=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__dragon__", "seed": 200}') +TEXT2=$(echo "$RESULT2" | python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))") +echo " Result: $TEXT2" + +RESPONSE=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded) +LOADED_COUNT_2=$(echo "$RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") +echo " Loaded wildcards: ${YELLOW}$LOADED_COUNT_2${NC}" + +if [ "$LOADED_COUNT_2" -le "$LOADED_COUNT_1" ]; then + echo "${RED}✗ FAIL: Expected loaded count to increase (was $LOADED_COUNT_1, now $LOADED_COUNT_2)${NC}" + exit 1 +fi +echo "${GREEN}✓ PASS: Wildcard count increased progressively${NC}" +echo "" + +# Step 4: Access third wildcard (YAML) +echo "${CYAN}Step 4: Access third wildcard (__colors__)${NC}" +RESULT3=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__colors__", "seed": 333}') +TEXT3=$(echo "$RESULT3" | python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))") +echo " Result: $TEXT3" + +RESPONSE=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded) +LOADED_COUNT_3=$(echo "$RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") +LOADED_LIST=$(echo "$RESPONSE" | python3 -c "import sys, json; print(', '.join(json.load(sys.stdin)['data'][:10]))") +echo " Loaded wildcards: ${YELLOW}$LOADED_COUNT_3${NC}" +echo " Sample loaded: $LOADED_LIST" + +if [ "$LOADED_COUNT_3" -le "$LOADED_COUNT_2" ]; then + echo "${RED}✗ FAIL: Expected loaded count to increase (was $LOADED_COUNT_2, now $LOADED_COUNT_3)${NC}" + exit 1 +fi +echo "${GREEN}✓ PASS: Wildcard count increased progressively${NC}" +echo "" + +# Step 5: Re-access first wildcard (should not increase count) +echo "${CYAN}Step 5: Re-access first wildcard (cached)${NC}" +RESULT4=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__samples/flower__", "seed": 42}') + +RESPONSE=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded) +LOADED_COUNT_4=$(echo "$RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") +echo " Loaded wildcards: ${YELLOW}$LOADED_COUNT_4${NC}" + +if [ "$LOADED_COUNT_4" -ne "$LOADED_COUNT_3" ]; then + echo "${YELLOW}⚠ WARNING: Count changed on cache access (was $LOADED_COUNT_3, now $LOADED_COUNT_4)${NC}" +else + echo "${GREEN}✓ PASS: Cached access did not change count${NC}" +fi +echo "" + +# Step 6: Deep transitive wildcard (should load multiple wildcards) +echo "${CYAN}Step 6: Deep transitive wildcard (__adnd__)${NC}" +RESULT5=$(curl -s http://127.0.0.1:$PORT/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__adnd__ creature", "seed": 222}') +TEXT5=$(echo "$RESULT5" | python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))") +echo " Result: $TEXT5" + +RESPONSE=$(curl -s http://127.0.0.1:$PORT/impact/wildcards/list/loaded) +LOADED_COUNT_5=$(echo "$RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") +echo " Loaded wildcards: ${YELLOW}$LOADED_COUNT_5${NC}" + +if [ "$LOADED_COUNT_5" -le "$LOADED_COUNT_4" ]; then + echo "${YELLOW}⚠ Transitive wildcards may already be loaded${NC}" +else + echo "${GREEN}✓ PASS: Transitive wildcards loaded progressively${NC}" +fi +echo "" + +# Summary +echo "==========================================" +echo "Progressive Loading Summary" +echo "==========================================" +echo "" +echo "Total available wildcards: $TOTAL_AVAILABLE" +echo "Loading progression:" +echo " Initial: $LOADED_COUNT" +echo " After step 2: $LOADED_COUNT_1 (+$(($LOADED_COUNT_1 - $LOADED_COUNT)))" +echo " After step 3: $LOADED_COUNT_2 (+$(($LOADED_COUNT_2 - $LOADED_COUNT_1)))" +echo " After step 4: $LOADED_COUNT_3 (+$(($LOADED_COUNT_3 - $LOADED_COUNT_2)))" +echo " After step 5: $LOADED_COUNT_4 (cache, no change)" +echo " After step 6: $LOADED_COUNT_5 (+$(($LOADED_COUNT_5 - $LOADED_COUNT_4)))" +echo "" + +# Validation +ALL_PASSED=true + +if [ "$LOADED_COUNT_1" -le "$LOADED_COUNT" ]; then + echo "${RED}✗ FAIL: Step 2 did not increase count${NC}" + ALL_PASSED=false +fi + +if [ "$LOADED_COUNT_2" -le "$LOADED_COUNT_1" ]; then + echo "${RED}✗ FAIL: Step 3 did not increase count${NC}" + ALL_PASSED=false +fi + +if [ "$LOADED_COUNT_3" -le "$LOADED_COUNT_2" ]; then + echo "${RED}✗ FAIL: Step 4 did not increase count${NC}" + ALL_PASSED=false +fi + +if [ "$ALL_PASSED" = true ]; then + echo "${GREEN}🎉 ALL TESTS PASSED${NC}" + echo "${GREEN}Progressive on-demand loading verified successfully!${NC}" + EXIT_CODE=0 +else + echo "${RED}❌ TESTS FAILED${NC}" + echo "${RED}Progressive loading did not work as expected!${NC}" + EXIT_CODE=1 +fi +echo "" + +# Restore config +cleanup +if [ -f "$BACKUP_CONFIG" ]; then + mv "$BACKUP_CONFIG" "$CONFIG_FILE" + echo "✓ Restored original config" +fi + +echo "" +echo "==========================================" +echo "Test Complete" +echo "==========================================" +echo "Log saved to: /tmp/progressive_test.log" +echo "" + +exit $EXIT_CODE diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_sequential_loading.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_sequential_loading.sh new file mode 100644 index 0000000000000000000000000000000000000000..f06012b4b012873330d4e400e67e8289f2fea832 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_sequential_loading.sh @@ -0,0 +1,327 @@ +#!/bin/bash +# Sequential Multi-Stage Wildcard Loading Test +# Tests transitive wildcards that load in multiple sequential stages + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +PORT=8193 +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" + +GREEN='\033[0;32m' +RED='\033[0;31m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +echo "==========================================" +echo "Sequential Multi-Stage Wildcard Loading Test" +echo "==========================================" +echo "" + +# Setup config for full cache mode +cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = 50 +EOF + +echo "Mode: Full cache mode (50MB limit)" +echo "" + +# Kill existing servers +pkill -9 -f "python.*main.py" 2>/dev/null || true +sleep 3 + +# Start server +COMFYUI_DIR="$(cd "$IMPACT_DIR/../.." && pwd)" +cd "$COMFYUI_DIR" +echo "Starting ComfyUI server on port $PORT..." +bash run.sh --listen 127.0.0.1 --port $PORT > /tmp/sequential_test.log 2>&1 & +SERVER_PID=$! + +# Wait for server +echo "Waiting 70 seconds for server startup..." +for i in {1..70}; do + sleep 1 + if [ $((i % 10)) -eq 0 ]; then + echo " ... $i seconds" + fi +done + +# Check server +if ! curl -s http://127.0.0.1:$PORT/ > /dev/null; then + echo "${RED}✗ Server failed to start${NC}" + exit 1 +fi + +echo "${GREEN}✓ Server started${NC}" +echo "" + +# Test function with stage visualization +test_sequential() { + local TEST_NUM=$1 + local RAW_PROMPT=$2 + local SEED=$3 + local DESCRIPTION=$4 + local EXPECTED_STAGES=$5 # Number of expected expansion stages + + echo "${BLUE}=== Test $TEST_NUM: $DESCRIPTION ===${NC}" + echo "Raw prompt: ${YELLOW}$RAW_PROMPT${NC}" + echo "Seed: $SEED" + echo "Expected stages: $EXPECTED_STAGES" + echo "" + + # Test the prompt + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$RAW_PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "${CYAN}Stage Analysis:${NC}" + echo " Stage 0 (Input): $RAW_PROMPT" + + # Check if result contains any wildcards (incomplete expansion) + if echo "$RESULT" | grep -q "__.*__"; then + echo " ${YELLOW}⚠ Result still contains wildcards (incomplete expansion)${NC}" + echo " Final Result: $RESULT" + else + echo " ${GREEN}✓ All wildcards fully expanded${NC}" + fi + + echo " Final Output: ${GREEN}$RESULT${NC}" + echo "" + + # Validate result + if [ "$RESULT" != "ERROR" ] && [ "$RESULT" != "" ]; then + # Check if result still has wildcards (shouldn't have) + if echo "$RESULT" | grep -q "__.*__"; then + echo "Status: ${YELLOW}⚠ PARTIAL - Wildcards remain${NC}" + else + echo "Status: ${GREEN}✅ SUCCESS - Complete expansion${NC}" + fi + else + echo "Status: ${RED}❌ FAILED - Error or empty result${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Sequential Loading Test Suite" +echo "==========================================" +echo "" + +echo "${CYAN}Test Category 1: Depth Verification${NC}" +echo "Testing different transitive depths with stage tracking" +echo "" + +# Test 1: Depth 1 (Direct wildcard) +test_sequential "01" \ + "__samples/flower__" \ + 42 \ + "Depth 1 - Direct wildcard (no transitive)" \ + 1 + +# Test 2: Depth 2 (One level transitive) +test_sequential "02" \ + "__dragon__" \ + 200 \ + "Depth 2 - One level transitive" \ + 2 + +# Test 3: Depth 3 (Two levels transitive) +test_sequential "03" \ + "__dragon__ warrior" \ + 200 \ + "Depth 3 - Two levels with suffix" \ + 3 + +# Test 4: Depth 3 (Maximum verified depth) +test_sequential "04" \ + "__adnd__ creature" \ + 222 \ + "Depth 3 - Maximum transitive chain" \ + 3 + +echo "" +echo "${CYAN}Test Category 2: Mixed Transitive Scenarios${NC}" +echo "Testing wildcards mixed with dynamic prompts" +echo "" + +# Test 5: Transitive with dynamic prompt +test_sequential "05" \ + "{__dragon__|__adnd__} in battle" \ + 100 \ + "Dynamic selection of transitive wildcards" \ + 3 + +# Test 6: Multiple transitive wildcards +test_sequential "06" \ + "__dragon__ fights __adnd__" \ + 150 \ + "Multiple transitive wildcards in one prompt" \ + 3 + +# Test 7: Nested transitive in dynamic +test_sequential "07" \ + "powerful {__dragon__|__adnd__|simple warrior}" \ + 200 \ + "Transitive wildcards nested in dynamic prompts" \ + 3 + +echo "" +echo "${CYAN}Test Category 3: Complex Sequential Scenarios${NC}" +echo "Testing complex multi-stage expansions" +echo "" + +# Test 8: Transitive with weights +test_sequential "08" \ + "{5::__dragon__|3::__adnd__|regular warrior}" \ + 250 \ + "Weighted selection with transitive wildcards" \ + 3 + +# Test 9: Multi-select with transitive +test_sequential "09" \ + "{2\$\$, \$\$__dragon__|__adnd__|warrior|mage}" \ + 300 \ + "Multi-select including transitive wildcards" \ + 3 + +# Test 10: Quantified transitive +test_sequential "10" \ + "{2\$\$, \$\$3#__dragon__}" \ + 350 \ + "Quantified wildcard with transitive expansion" \ + 3 + +echo "" +echo "${CYAN}Test Category 4: Edge Cases${NC}" +echo "Testing boundary conditions and special cases" +echo "" + +# Test 11: Transitive in compound grammar +test_sequential "11" \ + "1{girl holding __samples/flower__|boy riding __dragon__}" \ + 400 \ + "Compound grammar with mixed transitive depths" \ + 3 + +# Test 12: Multiple wildcards, different depths +test_sequential "12" \ + "__samples/flower__ and __dragon__ with __colors__" \ + 450 \ + "Multiple wildcards with varying depths" \ + 3 + +# Test 13: YAML wildcard (no transitive) +test_sequential "13" \ + "__colors__" \ + 333 \ + "YAML wildcard (depth 1, no transitive)" \ + 1 + +# Test 14: Transitive + YAML combination +test_sequential "14" \ + "__dragon__ with __colors__ armor" \ + 500 \ + "Combination of transitive and YAML wildcards" \ + 3 + +echo "" +echo "${CYAN}Test Category 5: On-Demand Mode Verification${NC}" +echo "Testing sequential loading in on-demand mode" +echo "" + +# Switch to on-demand mode +cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = 0.5 +EOF + +# Restart server +kill $SERVER_PID 2>/dev/null +pkill -9 -f "python.*main.py.*$PORT" 2>/dev/null +sleep 3 + +echo "Restarting server in on-demand mode (0.5MB limit)..." +cd "$COMFYUI_DIR" +bash run.sh --listen 127.0.0.1 --port $PORT > /tmp/sequential_ondemand.log 2>&1 & +SERVER_PID=$! + +echo "Waiting 70 seconds for server restart..." +for i in {1..70}; do + sleep 1 + if [ $((i % 10)) -eq 0 ]; then + echo " ... $i seconds" + fi +done + +if ! curl -s http://127.0.0.1:$PORT/ > /dev/null; then + echo "${RED}✗ Server failed to restart${NC}" + exit 1 +fi + +echo "${GREEN}✓ Server restarted in on-demand mode${NC}" +echo "" + +# Test 15: Same transitive in on-demand mode +test_sequential "15" \ + "__adnd__ creature" \ + 222 \ + "Depth 3 transitive in on-demand mode (should match full cache)" \ + 3 + +# Test 16: Complex scenario in on-demand +test_sequential "16" \ + "{__dragon__|__adnd__} {warrior|mage}" \ + 100 \ + "Complex transitive with dynamic in on-demand mode" \ + 3 + +# Test 17: Multiple transitive in on-demand +test_sequential "17" \ + "__dragon__ and __adnd__ together" \ + 150 \ + "Multiple transitive wildcards in on-demand mode" \ + 3 + +# Stop server +kill $SERVER_PID 2>/dev/null +pkill -9 -f "python.*main.py.*$PORT" 2>/dev/null + +echo "==========================================" +echo "Test Summary" +echo "==========================================" +echo "" +echo "Total tests: 17" +echo "Categories:" +echo " - Depth Verification (4 tests)" +echo " - Mixed Transitive Scenarios (3 tests)" +echo " - Complex Sequential Scenarios (3 tests)" +echo " - Edge Cases (4 tests)" +echo " - On-Demand Mode Verification (3 tests)" +echo "" +echo "Test Focus:" +echo " ✓ Multi-stage transitive wildcard expansion" +echo " ✓ Sequential loading across different depths" +echo " ✓ Transitive wildcards in dynamic prompts" +echo " ✓ Transitive wildcards with weights and multi-select" +echo " ✓ On-demand mode sequential loading verification" +echo "" +echo "Log saved to:" +echo " - Full cache mode: /tmp/sequential_test.log" +echo " - On-demand mode: /tmp/sequential_ondemand.log" +echo "" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_versatile_prompts.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_versatile_prompts.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a21a1e263bbff8215a7d927cdd50c5466c64887 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_versatile_prompts.sh @@ -0,0 +1,281 @@ +#!/bin/bash +# Comprehensive wildcard prompt test suite +# Tests all features from ImpactWildcard tutorial + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +PORT=8192 +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" + +GREEN='\033[0;32m' +RED='\033[0;31m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' + +echo "==========================================" +echo "Versatile Wildcard Prompt Test Suite" +echo "==========================================" +echo "" + +# Setup config +cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = 50 +EOF + +echo "Mode: Full cache mode (50MB limit)" +echo "" + +# Kill existing servers +pkill -9 -f "python.*main.py" 2>/dev/null || true +sleep 3 + +# Start server +COMFYUI_DIR="$(cd "$IMPACT_DIR/../.." && pwd)" +cd "$COMFYUI_DIR" +echo "Starting ComfyUI server on port $PORT..." +bash run.sh --listen 127.0.0.1 --port $PORT > /tmp/versatile_test.log 2>&1 & +SERVER_PID=$! + +# Wait for server +echo "Waiting 70 seconds for server startup..." +for i in {1..70}; do + sleep 1 + if [ $((i % 10)) -eq 0 ]; then + echo " ... $i seconds" + fi +done + +# Check server +if ! curl -s http://127.0.0.1:$PORT/ > /dev/null; then + echo "${RED}✗ Server failed to start${NC}" + exit 1 +fi + +echo "${GREEN}✓ Server started${NC}" +echo "" + +# Test function +test_prompt() { + local TEST_NUM=$1 + local CATEGORY=$2 + local PROMPT=$3 + local SEED=$4 + local DESCRIPTION=$5 + + echo "${BLUE}=== Test $TEST_NUM: $CATEGORY ===${NC}" + echo "Description: $DESCRIPTION" + echo "Raw prompt: ${YELLOW}$PROMPT${NC}" + echo "Seed: $SEED" + + RESULT=$(curl -s -X POST http://127.0.0.1:$PORT/impact/wildcards \ + -H "Content-Type: application/json" \ + -d "{\"text\": \"$PROMPT\", \"seed\": $SEED}" | \ + python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") + + echo "Populated: ${GREEN}$RESULT${NC}" + + if [ "$RESULT" != "ERROR" ] && [ "$RESULT" != "" ]; then + echo "Status: ${GREEN}✅ SUCCESS${NC}" + else + echo "Status: ${RED}❌ FAILED${NC}" + fi + echo "" +} + +echo "==========================================" +echo "Test Suite Execution" +echo "==========================================" +echo "" + +# Category 1: Simple Wildcards +test_prompt "01" "Simple Wildcard" \ + "__samples/flower__" \ + 42 \ + "Basic wildcard substitution" + +test_prompt "02" "Case Insensitive" \ + "__SAMPLES/FLOWER__" \ + 42 \ + "Wildcard names are case insensitive" + +test_prompt "03" "Mixed Case" \ + "__SaMpLeS/FlOwEr__" \ + 42 \ + "Mixed case should work identically" + +# Category 2: Dynamic Prompts +test_prompt "04" "Dynamic Prompt (Simple)" \ + "{red|green|blue} apple" \ + 100 \ + "Random selection from pipe-separated options" + +test_prompt "05" "Dynamic Prompt (Nested)" \ + "{a|{d|e|f}|c}" \ + 100 \ + "Nested dynamic prompts with inner choices" + +test_prompt "06" "Dynamic Prompt (Complex)" \ + "{blue apple|red {cherry|berry}|green melon}" \ + 100 \ + "Nested options with multiple levels" + +# Category 3: Selection Weights +test_prompt "07" "Weighted Selection" \ + "{5::red|4::green|7::blue|black} car" \ + 100 \ + "Weighted random selection (5:4:7:1 ratio)" + +test_prompt "08" "Weighted Complex" \ + "A {10::beautiful|5::stunning|amazing} {3::sunset|2::sunrise|dawn}" \ + 100 \ + "Multiple weighted selections in one prompt" + +# Category 4: Compound Grammar +test_prompt "09" "Wildcard + Dynamic" \ + "1girl holding {blue pencil|red apple|colorful __samples/flower__}" \ + 100 \ + "Mixing wildcard with dynamic prompt" + +test_prompt "10" "Multiple Wildcards" \ + "__samples/flower__ and __colors__" \ + 100 \ + "Multiple wildcards in single prompt" + +test_prompt "11" "Complex Compound" \ + "{1girl holding|1boy riding} {blue|red|__colors__} {pencil|__samples/flower__}" \ + 100 \ + "Complex nesting with wildcards and dynamics" + +# Category 5: Transitive Wildcards +test_prompt "12" "Transitive Depth 1" \ + "__dragon__" \ + 200 \ + "First level transitive wildcard" + +test_prompt "13" "Transitive Depth 2" \ + "__dragon__ warrior" \ + 200 \ + "Second level transitive with suffix" + +test_prompt "14" "Transitive Depth 3" \ + "__adnd__ creature" \ + 222 \ + "Third level transitive (adnd→dragon→dragon_spirit)" + +# Category 6: Multi-Select +test_prompt "15" "Multi-Select (Fixed)" \ + "{2\$\$, \$\$red|green|blue|yellow|purple}" \ + 100 \ + "Select exactly 2 items with comma separator" + +test_prompt "16" "Multi-Select (Range)" \ + "{1-3\$\$, \$\$apple|banana|orange|grape|mango}" \ + 100 \ + "Select 1-3 items randomly" + +test_prompt "17" "Multi-Select (Custom Sep)" \ + "{2\$\$ and \$\$cat|dog|bird|fish}" \ + 100 \ + "Custom separator: 'and' instead of comma" + +test_prompt "18" "Multi-Select (Or Sep)" \ + "{2-3\$\$ or \$\$happy|sad|excited|calm}" \ + 100 \ + "Range with 'or' separator" + +# Category 7: Quantifying Wildcard +test_prompt "19" "Quantified Wildcard" \ + "{2\$\$, \$\$3#__samples/flower__}" \ + 100 \ + "Repeat wildcard 3 times, select 2" + +test_prompt "20" "Quantified Complex" \ + "Garden with {3\$\$, \$\$5#__samples/flower__}" \ + 100 \ + "Select 3 from 5 repeated wildcards" + +# Category 8: YAML Wildcards +test_prompt "21" "YAML Simple" \ + "__colors__" \ + 333 \ + "YAML wildcard file" + +test_prompt "22" "YAML in Dynamic" \ + "{solid|{metallic|pastel} __colors__}" \ + 100 \ + "YAML wildcard nested in dynamic prompt" + +# Category 9: Complex Real-World Scenarios +test_prompt "23" "Realistic Prompt 1" \ + "1girl, {5::beautiful|3::stunning|gorgeous} __samples/flower__ in hair, {blue|red|__colors__} dress" \ + 100 \ + "Realistic character description" + +test_prompt "24" "Realistic Prompt 2" \ + "{detailed|highly detailed} {portrait|illustration} of {1girl|1boy} with {2\$\$, \$\$__samples/flower__|__samples/jewel__|elegant accessories}" \ + 100 \ + "Complex art prompt with multi-select" + +test_prompt "25" "Realistic Prompt 3" \ + "__adnd__ {warrior|mage|rogue}, {10::epic|5::legendary|mythical} {armor|robes}, wielding {ancient|magical} weapon" \ + 100 \ + "Fantasy character with transitive wildcard" + +# Category 10: Edge Cases +test_prompt "26" "Empty Dynamic" \ + "{|something|nothing}" \ + 100 \ + "Dynamic with empty option" + +test_prompt "27" "Single Option" \ + "{only_one}" \ + 100 \ + "Dynamic with single option (no choice)" + +test_prompt "28" "Deeply Nested" \ + "{a|{b|{c|{d|e}}}}" \ + 100 \ + "Very deep nesting" + +test_prompt "29" "Multiple Weights" \ + "{100::common|10::uncommon|1::rare|super_rare}" \ + 100 \ + "Extreme weight differences" + +test_prompt "30" "Wildcard Only" \ + "__samples/flower__" \ + 999 \ + "Different seed on same wildcard" + +# Stop server +kill $SERVER_PID 2>/dev/null +pkill -9 -f "python.*main.py.*$PORT" 2>/dev/null + +echo "==========================================" +echo "Test Summary" +echo "==========================================" +echo "" +echo "Total tests: 30" +echo "Categories tested:" +echo " - Simple Wildcards (3 tests)" +echo " - Dynamic Prompts (3 tests)" +echo " - Selection Weights (2 tests)" +echo " - Compound Grammar (3 tests)" +echo " - Transitive Wildcards (3 tests)" +echo " - Multi-Select (4 tests)" +echo " - Quantifying Wildcard (2 tests)" +echo " - YAML Wildcards (2 tests)" +echo " - Real-World Scenarios (3 tests)" +echo " - Edge Cases (5 tests)" +echo "" +echo "Log saved to: /tmp/versatile_test.log" +echo "" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_consistency.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_consistency.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b229d01b629adb776f532b925ae7d9ac0e32691 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_consistency.sh @@ -0,0 +1,226 @@ +#!/bin/bash +# Test wildcard consistency between full cache and on-demand modes + +set -e + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_PACK_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +COMFYUI_DIR="$(cd "$IMPACT_PACK_DIR/../.." && pwd)" +CONFIG_FILE="$IMPACT_PACK_DIR/impact-pack.ini" +BACKUP_CONFIG="$IMPACT_PACK_DIR/impact-pack.ini.backup" + +# Colors +GREEN='\033[0;32m' +RED='\033[0;31m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo "==========================================" +echo "Wildcard Consistency Test" +echo "==========================================" +echo "" + +# Backup original config +if [ -f "$CONFIG_FILE" ]; then + cp "$CONFIG_FILE" "$BACKUP_CONFIG" + echo "✓ Backed up original config" +fi + +# Function to kill ComfyUI +cleanup() { + pkill -f "python.*main.py" 2>/dev/null || true + sleep 2 +} + +# Function to test wildcard with specific config +test_with_config() { + local MODE=$1 + local CACHE_LIMIT=$2 + + echo "" + echo "${BLUE}Testing $MODE mode (cache limit: ${CACHE_LIMIT}MB)${NC}" + echo "----------------------------------------" + + # Update config + cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_PACK_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = $CACHE_LIMIT +EOF + + # Start ComfyUI + cleanup + cd "$COMFYUI_DIR" + bash run.sh --listen 127.0.0.1 --port 8190 > /tmp/comfyui_${MODE}.log 2>&1 & + COMFYUI_PID=$! + + echo " Waiting for server startup..." + sleep 15 + + # Check if server is running + if ! curl -s http://127.0.0.1:8190/ > /dev/null; then + echo "${RED}✗ Server failed to start${NC}" + cat /tmp/comfyui_${MODE}.log | grep -i "wildcard\|error" | tail -20 + cleanup + return 1 + fi + + # Check log for mode + MODE_LOG=$(grep -i "wildcard.*mode" /tmp/comfyui_${MODE}.log | tail -1) + echo " $MODE_LOG" + + # Test 1: Simple wildcard + echo "" + echo " Test 1: Simple wildcard substitution" + RESULT1=$(curl -s http://127.0.0.1:8190/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__samples/flower__", "seed": 42}') + + TEXT1=$(echo "$RESULT1" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: __samples/flower__" + echo " Output: $TEXT1" + echo " Result: $RESULT1" > /tmp/result_${MODE}_test1.json + + # Test 2: Dynamic prompt + echo "" + echo " Test 2: Dynamic prompt" + RESULT2=$(curl -s http://127.0.0.1:8190/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "{red|blue|green} flower", "seed": 123}') + + TEXT2=$(echo "$RESULT2" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: {red|blue|green} flower" + echo " Output: $TEXT2" + echo " Result: $RESULT2" > /tmp/result_${MODE}_test2.json + + # Test 3: Combined wildcard and dynamic prompt + echo "" + echo " Test 3: Combined wildcard + dynamic prompt" + RESULT3=$(curl -s http://127.0.0.1:8190/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "beautiful {red|blue} __samples/flower__ with __samples/jewel__", "seed": 456}') + + TEXT3=$(echo "$RESULT3" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: beautiful {red|blue} __samples/flower__ with __samples/jewel__" + echo " Output: $TEXT3" + echo " Result: $RESULT3" > /tmp/result_${MODE}_test3.json + + # Test 4: Transitive YAML wildcard + echo "" + echo " Test 4: Transitive YAML wildcard (test.yaml)" + RESULT4=$(curl -s http://127.0.0.1:8190/impact/wildcards \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{"text": "__colors__", "seed": 222}') + + TEXT4=$(echo "$RESULT4" | python3 -c "import sys, json; print(json.load(sys.stdin)['text'])") + echo " Input: __colors__ (transitive: __cold__|__warm__ -> blue|red|orange|yellow)" + echo " Output: $TEXT4" + echo " Expected: blue|red|orange|yellow" + echo " Result: $RESULT4" > /tmp/result_${MODE}_test4.json + + # Test 5: Wildcard list + echo "" + echo " Test 5: Wildcard list API" + LIST_RESULT=$(curl -s http://127.0.0.1:8190/impact/wildcards/list) + LIST_COUNT=$(echo "$LIST_RESULT" | python3 -c "import sys, json; print(len(json.load(sys.stdin)['data']))") + echo " Wildcards found: $LIST_COUNT" + echo " Sample: $(echo "$LIST_RESULT" | python3 -c "import sys, json; print(', '.join(json.load(sys.stdin)['data'][:5]))")" + echo " Result: $LIST_RESULT" > /tmp/result_${MODE}_list.json + + # Stop server + cleanup + + echo "" + echo "${GREEN}✓ $MODE mode tests completed${NC}" +} + +# Run tests +echo "" +echo "Starting consistency tests..." + +# Test full cache mode +test_with_config "full_cache" 50 + +# Test on-demand mode +test_with_config "on_demand" 1 + +# Compare results +echo "" +echo "==========================================" +echo "Comparing Results" +echo "==========================================" + +echo "" +echo "Test 1: Simple wildcard" +DIFF1=$(diff /tmp/result_full_cache_test1.json /tmp/result_on_demand_test1.json || true) +if [ -z "$DIFF1" ]; then + echo "${GREEN}✓ Results match${NC}" +else + echo "${RED}✗ Results differ${NC}" + echo "$DIFF1" +fi + +echo "" +echo "Test 2: Dynamic prompt" +DIFF2=$(diff /tmp/result_full_cache_test2.json /tmp/result_on_demand_test2.json || true) +if [ -z "$DIFF2" ]; then + echo "${GREEN}✓ Results match${NC}" +else + echo "${RED}✗ Results differ${NC}" + echo "$DIFF2" +fi + +echo "" +echo "Test 3: Combined wildcard + dynamic prompt" +DIFF3=$(diff /tmp/result_full_cache_test3.json /tmp/result_on_demand_test3.json || true) +if [ -z "$DIFF3" ]; then + echo "${GREEN}✓ Results match${NC}" +else + echo "${RED}✗ Results differ${NC}" + echo "$DIFF3" +fi + +echo "" +echo "Test 4: Transitive YAML wildcard" +DIFF4=$(diff /tmp/result_full_cache_test4.json /tmp/result_on_demand_test4.json || true) +if [ -z "$DIFF4" ]; then + echo "${GREEN}✓ Results match${NC}" +else + echo "${RED}✗ Results differ${NC}" + echo "$DIFF4" +fi + +echo "" +echo "Test 5: Wildcard list" +DIFF_LIST=$(diff /tmp/result_full_cache_list.json /tmp/result_on_demand_list.json || true) +if [ -z "$DIFF_LIST" ]; then + echo "${GREEN}✓ Wildcard lists match${NC}" +else + echo "${RED}✗ Wildcard lists differ${NC}" + echo "$DIFF_LIST" +fi + +# Restore original config +if [ -f "$BACKUP_CONFIG" ]; then + mv "$BACKUP_CONFIG" "$CONFIG_FILE" + echo "" + echo "✓ Restored original config" +fi + +# Final cleanup +cleanup + +echo "" +echo "==========================================" +echo "Consistency Test Complete" +echo "==========================================" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_final.py b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_final.py new file mode 100644 index 0000000000000000000000000000000000000000..eb78d3dbdd9573a81f96d4810c4a02bef91ac8db --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_final.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +""" +Final comprehensive wildcard test - validates consistency between full cache and on-demand modes +Tests include: +1. Simple wildcard substitution +2. Nested wildcards (transitive loading) +3. Multiple wildcards in single prompt +4. Dynamic prompts combined with wildcards +5. YAML-based wildcards +""" + +import subprocess +import time +import sys +from pathlib import Path + +# Auto-detect paths +SCRIPT_DIR = Path(__file__).parent +IMPACT_PACK_DIR = SCRIPT_DIR.parent +COMFYUI_DIR = IMPACT_PACK_DIR.parent.parent +CONFIG_FILE = IMPACT_PACK_DIR / "impact-pack.ini" + +def run_test(test_name, cache_limit, test_cases): + """Run tests with specific cache limit""" + print(f"\n{'='*60}") + print(f"Testing: {test_name}") + print(f"Cache Limit: {cache_limit} MB") + print(f"{'='*60}\n") + + # Update config + config_content = f"""[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = {IMPACT_PACK_DIR}/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = {cache_limit} +""" + + with open(CONFIG_FILE, 'w') as f: + f.write(config_content) + + # Start ComfyUI + print("Starting ComfyUI...") + proc = subprocess.Popen( + ['bash', 'run.sh', '--listen', '127.0.0.1', '--port', '8191'], + cwd=str(COMFYUI_DIR), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True + ) + + # Wait for server to start + time.sleep(20) + + # Check logs + import requests + try: + response = requests.get('http://127.0.0.1:8191/') + print("✓ Server started successfully\n") + except Exception: + print("✗ Server failed to start") + proc.terminate() + return {} + + # Run test cases + results = {} + for i, (description, text, seed) in enumerate(test_cases, 1): + print(f"Test {i}: {description}") + print(f" Input: {text}") + + try: + response = requests.post( + 'http://127.0.0.1:8191/impact/wildcards', + json={'text': text, 'seed': seed}, + timeout=5 + ) + result = response.json() + output = result.get('text', '') + print(f" Output: {output}") + results[f"test{i}"] = output + except Exception as e: + print(f" Error: {e}") + results[f"test{i}"] = f"ERROR: {e}" + + print() + + # Stop server + proc.terminate() + time.sleep(2) + + return results + +def main(): + print("\n" + "="*60) + print("WILDCARD COMPREHENSIVE CONSISTENCY TEST") + print("="*60) + + # Test cases: (description, wildcard text, seed) + test_cases = [ + # Test 1: Simple wildcard + ("Simple wildcard", "__samples/flower__", 42), + + # Test 2: Multiple wildcards + ("Multiple wildcards", "a __samples/flower__ and a __samples/jewel__", 123), + + # Test 3: Dynamic prompt + ("Dynamic prompt", "{red|blue|green} flower", 456), + + # Test 4: Combined wildcard + dynamic + ("Combined", "{beautiful|elegant} __samples/flower__ with {gold|silver} __samples/jewel__", 789), + + # Test 5: Nested selection (multi-select) + ("Multi-select", "{2$$, $$__samples/flower__|rose|tulip|daisy}", 111), + + # Test 6: Transitive YAML wildcard (custom_wildcards/test.yaml) + # __colors__ → __cold__|__warm__ → blue|red|orange|yellow + ("Transitive YAML wildcard", "__colors__", 222), + + # Test 7: Transitive with text + ("Transitive with context", "a {beautiful|vibrant} __colors__ flower", 333), + ] + + # Test with full cache mode + results_full = run_test("Full Cache Mode", 50, test_cases) + + time.sleep(5) + + # Test with on-demand mode + results_on_demand = run_test("On-Demand Mode", 1, test_cases) + + # Compare results + print("\n" + "="*60) + print("RESULTS COMPARISON") + print("="*60 + "\n") + + all_match = True + for key in results_full.keys(): + full_result = results_full.get(key, "MISSING") + on_demand_result = results_on_demand.get(key, "MISSING") + + match = full_result == on_demand_result + all_match = all_match and match + + status = "✓ MATCH" if match else "✗ DIFFER" + print(f"{key}: {status}") + if not match: + print(f" Full cache: {full_result}") + print(f" On-demand: {on_demand_result}") + print() + + # Final verdict + print("="*60) + if all_match: + print("✅ ALL TESTS PASSED - Results are identical") + print("="*60) + return 0 + else: + print("❌ TESTS FAILED - Results differ between modes") + print("="*60) + return 1 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_lazy_loading.py b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_lazy_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..958fbe452945bd78f8e2fa8bc3129f4c8a9c2270 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/test_wildcard_lazy_loading.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +""" +Test script for wildcard lazy loading functionality +""" +import sys +import os +import tempfile + +# Add parent directory to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + +from modules.impact import wildcards + +def test_lazy_loader(): + """Test LazyWildcardLoader class""" + print("=" * 60) + print("TEST 1: LazyWildcardLoader functionality") + print("=" * 60) + + # Create a temporary test file + with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: + f.write("option1\n") + f.write("option2\n") + f.write("# comment line\n") + f.write("option3\n") + temp_file = f.name + + try: + # Test lazy loading + loader = wildcards.LazyWildcardLoader(temp_file, 'txt') + print(f"✓ Created LazyWildcardLoader: {loader}") + + # Check that data is not loaded yet + assert not loader._loaded, "Data should not be loaded initially" + print("✓ Data not loaded initially (lazy)") + + # Access data + data = loader.get_data() + print(f"✓ Loaded data: {data}") + assert len(data) == 3, f"Expected 3 items, got {len(data)}" + assert 'option1' in data, "option1 should be in data" + + # Check that data is now loaded + assert loader._loaded, "Data should be loaded after access" + print("✓ Data loaded after first access") + + # Test list-like operations + print(f"✓ len(loader) = {len(loader)}") + assert len(loader) == 3 + + print(f"✓ loader[0] = {loader[0]}") + assert loader[0] == 'option1' + + print(f"✓ 'option2' in loader = {'option2' in loader}") + assert 'option2' in loader + + print(f"✓ list(loader) = {list(loader)}") + + print("\n✅ LazyWildcardLoader tests PASSED\n") + + finally: + os.unlink(temp_file) + + +def test_cache_limit_detection(): + """Test automatic cache mode detection""" + print("=" * 60) + print("TEST 2: Cache limit detection") + print("=" * 60) + + # Get current cache limit + limit = wildcards.get_cache_limit() + print(f"✓ Cache limit: {limit / (1024*1024):.2f} MB") + + # Calculate wildcard directory size + wildcards_dir = wildcards.wildcards_path + total_size = wildcards.calculate_directory_size(wildcards_dir) + print(f"✓ Wildcards directory size: {total_size / (1024*1024):.2f} MB") + print(f"✓ Wildcards path: {wildcards_dir}") + + # Determine expected mode + if total_size >= limit: + expected_mode = "on-demand" + else: + expected_mode = "full cache" + + print(f"✓ Expected mode: {expected_mode}") + print("\n✅ Cache detection tests PASSED\n") + + +def test_wildcard_loading(): + """Test actual wildcard loading""" + print("=" * 60) + print("TEST 3: Wildcard loading with current mode") + print("=" * 60) + + # Clear existing wildcards + wildcards.wildcard_dict = {} + wildcards._on_demand_mode = False + + # Load wildcards + print("Loading wildcards...") + wildcards.wildcard_load() + + # Check mode + is_on_demand = wildcards.is_on_demand_mode() + print(f"✓ On-demand mode active: {is_on_demand}") + + # Check loaded wildcards + wc_list = wildcards.get_wildcard_list() + print(f"✓ Loaded {len(wc_list)} wildcards") + + if len(wc_list) > 0: + print(f"✓ Sample wildcards: {wc_list[:5]}") + + # Test accessing a wildcard + if len(wildcards.wildcard_dict) > 0: + key = list(wildcards.wildcard_dict.keys())[0] + value = wildcards.wildcard_dict[key] + print(f"✓ Sample wildcard '{key}' type: {type(value).__name__}") + + if isinstance(value, wildcards.LazyWildcardLoader): + print(f" - LazyWildcardLoader: {value}") + print(f" - Loaded: {value._loaded}") + # Access the data + data = value.get_data() + print(f" - Data loaded, items: {len(data)}") + else: + print(f" - Direct list, items: {len(value)}") + + print("\n✅ Wildcard loading tests PASSED\n") + + +def test_on_demand_simulation(): + """Simulate on-demand mode with temporary wildcards""" + print("=" * 60) + print("TEST 4: On-demand mode simulation") + print("=" * 60) + + # Create temporary wildcard directory + with tempfile.TemporaryDirectory() as tmpdir: + # Create test files + test_file1 = os.path.join(tmpdir, "test1.txt") + test_file2 = os.path.join(tmpdir, "test2.txt") + + with open(test_file1, 'w') as f: + f.write("option1a\noption1b\noption1c\n") + + with open(test_file2, 'w') as f: + f.write("option2a\noption2b\n") + + # Clear and load with on-demand mode + wildcards.wildcard_dict = {} + wildcards._on_demand_mode = False + + print(f"✓ Loading from temp directory: {tmpdir}") + wildcards.read_wildcard_dict(tmpdir, on_demand=True) + + print(f"✓ Loaded {len(wildcards.wildcard_dict)} wildcards") + + for key, value in wildcards.wildcard_dict.items(): + print(f"✓ Wildcard '{key}':") + print(f" - Type: {type(value).__name__}") + if isinstance(value, wildcards.LazyWildcardLoader): + print(f" - Initially loaded: {value._loaded}") + data = value.get_data() + print(f" - After access: loaded={value._loaded}, items={len(data)}") + print(f" - Sample data: {data[:2]}") + + print("\n✅ On-demand simulation tests PASSED\n") + + +def main(): + """Run all tests""" + print("\n" + "=" * 60) + print("WILDCARD LAZY LOADING TEST SUITE") + print("=" * 60 + "\n") + + try: + test_lazy_loader() + test_cache_limit_detection() + test_wildcard_loading() + test_on_demand_simulation() + + print("=" * 60) + print("✅ ALL TESTS PASSED") + print("=" * 60) + return 0 + + except Exception as e: + print("\n" + "=" * 60) + print(f"❌ TEST FAILED: {e}") + print("=" * 60) + import traceback + traceback.print_exc() + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/verify_ondemand_mode.sh b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/verify_ondemand_mode.sh new file mode 100644 index 0000000000000000000000000000000000000000..1abc1b3abb80d8510d30e0221959355801387cf8 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/wildcards/verify_ondemand_mode.sh @@ -0,0 +1,97 @@ +#!/bin/bash +# Verify that on-demand mode is actually triggered with 0.5MB limit + +# Auto-detect paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPACT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +CONFIG_FILE="$IMPACT_DIR/impact-pack.ini" + +echo "==========================================" +echo "Verify On-Demand Mode Activation" +echo "==========================================" +echo "" + +# Set config to 0.5MB limit +cat > "$CONFIG_FILE" << EOF +[default] +dependency_version = 24 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_h_4b8939.pth +custom_wildcards = $IMPACT_DIR/custom_wildcards +disable_gpu_opencv = True +wildcard_cache_limit_mb = 0.5 +EOF + +echo "Config set to 0.5MB cache limit" +echo "" + +# Kill any existing servers +pkill -9 -f "python.*main.py" 2>/dev/null || true +sleep 3 + +# Start server +COMFYUI_DIR="$(cd "$IMPACT_DIR/../.." && pwd)" +cd "$COMFYUI_DIR" +echo "Starting ComfyUI server on port 8190..." +bash run.sh --listen 127.0.0.1 --port 8190 > /tmp/verify_ondemand.log 2>&1 & +SERVER_PID=$! + +# Wait for server +echo "Waiting 70 seconds for server startup..." +for i in {1..70}; do + sleep 1 + if [ $((i % 10)) -eq 0 ]; then + echo " ... $i seconds" + fi +done + +# Check server +if ! curl -s http://127.0.0.1:8190/ > /dev/null; then + echo "✗ Server failed to start" + cat /tmp/verify_ondemand.log + exit 1 +fi + +echo "✓ Server started" +echo "" + +# Check loading mode +echo "Loading mode detected:" +grep -i "wildcard.*mode\|wildcard.*size.*cache" /tmp/verify_ondemand.log | grep -v "Maximum depth" +echo "" + +# Verify mode +if grep -q "Using on-demand loading mode" /tmp/verify_ondemand.log; then + echo "✅ SUCCESS: On-demand mode activated with 0.5MB limit!" +elif grep -q "Using full cache mode" /tmp/verify_ondemand.log; then + echo "❌ FAIL: Full cache mode used (should be on-demand)" + echo "" + echo "Cache limit in log:" + grep "cache limit" /tmp/verify_ondemand.log +else + echo "⚠️ WARNING: Could not determine mode" +fi + +# Test wildcard functionality +echo "" +echo "Testing wildcard functionality in on-demand mode..." +curl -s -X POST http://127.0.0.1:8190/impact/wildcards \ + -H "Content-Type: application/json" \ + -d '{"text": "__adnd__ creature", "seed": 222}' > /tmp/verify_result.json + +RESULT=$(cat /tmp/verify_result.json | python3 -c "import sys, json; print(json.load(sys.stdin).get('text','ERROR'))" 2>/dev/null || echo "ERROR") +echo " Depth 3 transitive (seed=222): $RESULT" + +if [ "$RESULT" = "Shrewd Hatchling creature" ]; then + echo " ✅ Transitive wildcard works correctly" +else + echo " ❌ Unexpected result: $RESULT" +fi + +# Stop server +kill $SERVER_PID 2>/dev/null +pkill -9 -f "python.*main.py.*8190" 2>/dev/null + +echo "" +echo "Full log saved to: /tmp/verify_ondemand.log" diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/advanced-sampler.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/advanced-sampler.json new file mode 100644 index 0000000000000000000000000000000000000000..d531ed8e1cc6cbfc582b06d8c595ae2173def9bc --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/advanced-sampler.json @@ -0,0 +1,976 @@ +{ + "last_node_id": 27, + "last_link_id": 46, + "nodes": [ + { + "id": 11, + "type": "EditBasicPipe", + "pos": [ + 1260, + 590 + ], + "size": { + "0": 267, + "1": 126 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 15 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 17 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 20 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 420, + 670 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 16 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 17 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, masterpiece, 1girl is sitting in the cafe terrace, (colorful hair:1.1)" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 415, + 186 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 13 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, masterpiece, 1girl is sitting in the cafe terrace" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 413, + 389 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 14 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, low quality:1.4, worst quality:1.4" + ] + }, + { + "id": 10, + "type": "ToBasicPipe", + "pos": [ + 952, + 189 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 10 + }, + { + "name": "clip", + "type": "CLIP", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 13 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 14 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 15, + 19, + 33 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + } + }, + { + "id": 22, + "type": "FromBasicPipe", + "pos": [ + 880, + 1040 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 33 + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "clip", + "type": "CLIP", + "links": null, + "shape": 3 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 40 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 35 + ], + "shape": 3, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 36 + ], + "shape": 3, + "slot_index": 4 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe" + } + }, + { + "id": 24, + "type": "VAEDecode", + "pos": [ + 1938, + 935 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 46 + }, + { + "name": "vae", + "type": "VAE", + "link": 40 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 41 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -5, + 212 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 10 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5, + 11, + 16 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 12, + 31 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "V07_v07.safetensors" + ] + }, + { + "id": 25, + "type": "PreviewImage", + "pos": [ + 2175, + 1079 + ], + "size": { + "0": 516, + "1": 424 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 41 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 13, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1727, + 192 + ], + "size": { + "0": 355.20001220703125, + "1": 154 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 19 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 42 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "fixed", + "normal" + ] + }, + { + "id": 16, + "type": "EmptyLatentImage", + "pos": [ + 532, + 1143 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 28, + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 792, + 512, + 1 + ] + }, + { + "id": 19, + "type": "KSampler", + "pos": [ + 1194.657802060547, + 1075.971700888672 + ], + "size": [ + 315, + 473.9999771118164 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 34 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 35 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 36 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 28 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 30 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1107040072933062, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 27, + "type": "TwoAdvancedSamplersForMask", + "pos": [ + 2187, + 266 + ], + "size": [ + 315, + 426.00000762939453 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 45 + }, + { + "name": "base_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 42 + }, + { + "name": "mask_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 43 + }, + { + "name": "mask", + "type": "MASK", + "link": 44 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 46 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "TwoAdvancedSamplersForMask" + }, + "widgets_values": [ + 1107040072933062, + "fixed", + 20, + 1, + 10 + ] + }, + { + "id": 23, + "type": "PreviewBridge", + "pos": [ + 1778, + 1098 + ], + "size": { + "0": 315, + "1": 290 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 37 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 44 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "PreviewBridge" + }, + "widgets_values": [ + { + "filename": "clipspace-mask-348148.69999999925.png", + "subfolder": "clipspace", + "type": "input", + "image_hash": 492469318636598500, + "forward_filename": "ComfyUI_00001_.png", + "forward_subfolder": "", + "forward_type": "temp" + } + ] + }, + { + "id": 15, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1719, + 592 + ], + "size": { + "0": 355.20001220703125, + "1": 154 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 20 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 43 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "fixed", + "normal" + ] + }, + { + "id": 20, + "type": "VAEDecode", + "pos": [ + 1546, + 972 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 30 + }, + { + "name": "vae", + "type": "VAE", + "link": 31 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + } + ], + "links": [ + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 10, + 4, + 0, + 10, + 0, + "MODEL" + ], + [ + 11, + 4, + 1, + 10, + 1, + "CLIP" + ], + [ + 12, + 4, + 2, + 10, + 2, + "VAE" + ], + [ + 13, + 6, + 0, + 10, + 3, + "CONDITIONING" + ], + [ + 14, + 7, + 0, + 10, + 4, + "CONDITIONING" + ], + [ + 15, + 10, + 0, + 11, + 0, + "BASIC_PIPE" + ], + [ + 16, + 4, + 1, + 12, + 0, + "CLIP" + ], + [ + 17, + 12, + 0, + 11, + 4, + "CONDITIONING" + ], + [ + 19, + 10, + 0, + 13, + 0, + "BASIC_PIPE" + ], + [ + 20, + 11, + 0, + 15, + 0, + "BASIC_PIPE" + ], + [ + 28, + 16, + 0, + 19, + 3, + "LATENT" + ], + [ + 30, + 19, + 0, + 20, + 0, + "LATENT" + ], + [ + 31, + 4, + 2, + 20, + 1, + "VAE" + ], + [ + 33, + 10, + 0, + 22, + 0, + "BASIC_PIPE" + ], + [ + 34, + 22, + 0, + 19, + 0, + "MODEL" + ], + [ + 35, + 22, + 3, + 19, + 1, + "CONDITIONING" + ], + [ + 36, + 22, + 4, + 19, + 2, + "CONDITIONING" + ], + [ + 37, + 20, + 0, + 23, + 0, + "IMAGE" + ], + [ + 40, + 22, + 2, + 24, + 1, + "VAE" + ], + [ + 41, + 24, + 0, + 25, + 0, + "IMAGE" + ], + [ + 42, + 13, + 0, + 27, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 43, + 15, + 0, + 27, + 2, + "KSAMPLER_ADVANCED" + ], + [ + 44, + 23, + 1, + 27, + 3, + "MASK" + ], + [ + 45, + 16, + 0, + 27, + 0, + "LATENT" + ], + [ + 46, + 27, + 0, + 24, + 0, + "LATENT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/detailer-pipe-test-sdxl.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/detailer-pipe-test-sdxl.json new file mode 100644 index 0000000000000000000000000000000000000000..92645260c160107d6d262e4e7e24d97c87f1abcd --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/detailer-pipe-test-sdxl.json @@ -0,0 +1,1989 @@ +{ + "last_node_id": 52, + "last_link_id": 150, + "nodes": [ + { + "id": 12, + "type": "CLIPTextEncodeSDXLRefiner", + "pos": [ + 480, + 990 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 11 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 13 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXLRefiner" + }, + "widgets_values": [ + 6, + 1024, + 1024, + "ugly, male, western" + ] + }, + { + "id": 14, + "type": "UltralyticsDetectorProvider", + "pos": [ + 963, + 955 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "links": [ + 16 + ], + "shape": 3 + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ] + }, + { + "id": 18, + "type": "PreviewImage", + "pos": [ + 3270, + 810 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 20 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 15, + "type": "SAMLoader", + "pos": [ + 967, + 1086 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 17 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "CPU" + ] + }, + { + "id": 9, + "type": "CLIPTextEncodeSDXL", + "pos": [ + 640, + -550 + ], + "size": { + "0": 400, + "1": 270 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXL" + }, + "widgets_values": [ + 1024, + 1024, + 0, + 0, + 1024, + 1024, + "a closeup photograph of cute girl", + "closeup" + ] + }, + { + "id": 7, + "type": "CheckpointLoaderSimple", + "pos": [ + 60, + -580 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 2, + 6, + 7 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 3 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SDXL/rundiffusionXL_beta.safetensors" + ] + }, + { + "id": 13, + "type": "LoadImage", + "pos": [ + 257, + 164 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 15, + 64, + 112 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "chunli.png", + "image" + ] + }, + { + "id": 10, + "type": "CLIPTextEncodeSDXL", + "pos": [ + 640, + -230 + ], + "size": { + "0": 400, + "1": 270 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 7 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXL" + }, + "widgets_values": [ + 1024, + 1024, + 0, + 0, + 1024, + 1024, + "ugly, male", + "ugly, male" + ] + }, + { + "id": 17, + "type": "PreviewImage", + "pos": [ + 3270, + 450 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 19 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 8, + "type": "CheckpointLoaderSimple", + "pos": [ + 120, + 590 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 69 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 10, + 11 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors" + ] + }, + { + "id": 11, + "type": "CLIPTextEncodeSDXLRefiner", + "pos": [ + 483, + 738 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 10 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 70 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXLRefiner" + }, + "widgets_values": [ + 6, + 1024, + 1024, + "high quality" + ] + }, + { + "id": 37, + "type": "PreviewImage", + "pos": [ + 2810, + -280 + ], + "size": { + "0": 344.04876708984375, + "1": 580.6563720703125 + }, + "flags": {}, + "order": 7, + "mode": 2, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 64 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 3200, + -280 + ], + "size": { + "0": 336.36944580078125, + "1": 585.6206665039062 + }, + "flags": {}, + "order": 18, + "mode": 2, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 18 + } + ], + "title": "SDXL Base only", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 6, + "type": "ToDetailerPipeSDXL", + "pos": [ + 1199, + 379 + ], + "size": { + "0": 400, + "1": 340 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1 + }, + { + "name": "clip", + "type": "CLIP", + "link": 2 + }, + { + "name": "vae", + "type": "VAE", + "link": 3 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "refiner_model", + "type": "MODEL", + "link": 69 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "link": 5, + "slot_index": 6 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "link": 70 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "link": 13, + "slot_index": 8 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 16, + "slot_index": 9 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 17, + "slot_index": 10 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": null + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 114 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToDetailerPipeSDXL" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 38, + "type": "PreviewImage", + "pos": [ + 3590, + -280 + ], + "size": { + "0": 336.36944580078125, + "1": 585.6206665039062 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 67 + } + ], + "title": "SDXL Base + Refiner", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 41, + "type": "BasicPipeToDetailerPipeSDXL", + "pos": [ + 2160, + 1010 + ], + "size": { + "0": 405.5999755859375, + "1": 200 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "base_basic_pipe", + "type": "BASIC_PIPE", + "link": 87 + }, + { + "name": "refiner_basic_pipe", + "type": "BASIC_PIPE", + "link": 88 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 133 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 134, + "slot_index": 3 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 135, + "slot_index": 4 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": 136, + "slot_index": 5 + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 86, + 110 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipeSDXL" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 44, + "type": "FaceDetailerPipe", + "pos": [ + 3565, + 427 + ], + "size": { + "0": 456, + "1": 902 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 104, + "slot_index": 0 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 103 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [], + "shape": 6, + "slot_index": 1 + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 105 + ], + "shape": 6, + "slot_index": 2 + }, + { + "name": "mask", + "type": "MASK", + "links": null, + "shape": 3 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": null, + "shape": 3 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 1024, + false, + 768, + 104033248204033, + "fixed", + 30, + 8, + "euler", + "normal", + 0.5, + 5, + true, + true, + 0.6, + 30, + 3, + "center-1", + 30, + 0.93, + 0, + 0.7, + "False", + 10, + 0.1 + ] + }, + { + "id": 45, + "type": "PreviewImage", + "pos": [ + 4109.76494140625, + 483.81650390625 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 105 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 1, + "type": "FaceDetailerPipe", + "pos": [ + 2720, + 430 + ], + "size": { + "0": 456, + "1": 902 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 15, + "slot_index": 0 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 86 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 18, + 67, + 104, + 106 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 19 + ], + "shape": 6, + "slot_index": 1 + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 20 + ], + "shape": 6, + "slot_index": 2 + }, + { + "name": "mask", + "type": "MASK", + "links": null, + "shape": 3 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 103 + ], + "shape": 3, + "slot_index": 4 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 1024, + false, + 768, + 104033248204033, + "fixed", + 30, + 8, + "euler", + "normal", + 0.5, + 5, + true, + true, + 0.6, + 30, + 3, + "center-1", + 30, + 0.93, + 0, + 0.7, + "False", + 10, + 0.1 + ] + }, + { + "id": 43, + "type": "ToBasicPipe", + "pos": [ + 1790, + 1130 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 142 + }, + { + "name": "clip", + "type": "CLIP", + "link": 143 + }, + { + "name": "vae", + "type": "VAE", + "link": 145, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 150 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 88, + 108 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + } + }, + { + "id": 49, + "type": "ImpactSimpleDetectorSEGSPipe", + "pos": [ + 2236.375298828125, + 1520.8711416015626 + ], + "size": { + "0": 315, + "1": 246 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 110, + "slot_index": 0 + }, + { + "name": "image", + "type": "IMAGE", + "link": 112, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 111 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactSimpleDetectorSEGSPipe" + }, + "widgets_values": [ + 0.5, + 0, + 3, + 10, + 0.5, + 0, + 0, + 0.7 + ] + }, + { + "id": 47, + "type": "DetailerForEachPipe", + "pos": [ + 2725, + 1448 + ], + "size": { + "0": 456.5638732910156, + "1": 559.1150512695312 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 106 + }, + { + "name": "segs", + "type": "SEGS", + "link": 111, + "slot_index": 1 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 107, + "slot_index": 2 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "link": 108 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 113 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "links": null, + "shape": 3 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": null, + "shape": 3 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "DetailerForEachPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 450265819682234, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + true, + "", + 0.2 + ] + }, + { + "id": 50, + "type": "PreviewImage", + "pos": [ + 3448.7228955078117, + 1463.962194335937 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 113 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 40, + "type": "ToDetailerPipeSDXL", + "pos": [ + 2226, + 539 + ], + "size": { + "0": 400, + "1": 340 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 125 + }, + { + "name": "clip", + "type": "CLIP", + "link": 116, + "slot_index": 1 + }, + { + "name": "vae", + "type": "VAE", + "link": 117 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 120, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 121 + }, + { + "name": "refiner_model", + "type": "MODEL", + "link": 124, + "slot_index": 5 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "link": 126 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "link": 127, + "slot_index": 7 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "link": 128 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 129 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 130, + "slot_index": 10 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 131 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": 132, + "slot_index": 12 + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToDetailerPipeSDXL" + }, + "widgets_values": [ + "", + "SDXL/person/IU_leejieun_SDXL.safetensors" + ] + }, + { + "id": 42, + "type": "ToBasicPipe", + "pos": [ + 1899, + 906 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "clip", + "type": "CLIP", + "link": 138, + "slot_index": 1 + }, + { + "name": "vae", + "type": "VAE", + "link": 139, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 147, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 148, + "slot_index": 4 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 87, + 107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + } + }, + { + "id": 51, + "type": "FromDetailerPipeSDXL", + "pos": [ + 1650, + 520 + ], + "size": { + "0": 393, + "1": 286 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 114 + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": null, + "shape": 3 + }, + { + "name": "model", + "type": "MODEL", + "links": [ + 125, + 137 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 116, + 138, + 143 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 117, + 139, + 145 + ], + "shape": 3, + "slot_index": 3 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 120, + 147 + ], + "shape": 3, + "slot_index": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 121, + 148 + ], + "shape": 3, + "slot_index": 5 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 129, + 133 + ], + "shape": 3, + "slot_index": 6 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 130, + 134 + ], + "shape": 3, + "slot_index": 7 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 131, + 135 + ], + "shape": 3, + "slot_index": 8 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": [ + 132, + 136 + ], + "shape": 3, + "slot_index": 9 + }, + { + "name": "refiner_model", + "type": "MODEL", + "links": [ + 124, + 142 + ], + "shape": 3, + "slot_index": 10 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "links": [ + 126 + ], + "shape": 3, + "slot_index": 11 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "links": [ + 127, + 149 + ], + "shape": 3, + "slot_index": 12 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "links": [ + 128, + 150 + ], + "shape": 3, + "slot_index": 13 + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipeSDXL" + } + } + ], + "links": [ + [ + 1, + 7, + 0, + 6, + 0, + "MODEL" + ], + [ + 2, + 7, + 1, + 6, + 1, + "CLIP" + ], + [ + 3, + 7, + 2, + 6, + 2, + "VAE" + ], + [ + 5, + 8, + 1, + 6, + 6, + "CLIP" + ], + [ + 6, + 7, + 1, + 9, + 0, + "CLIP" + ], + [ + 7, + 7, + 1, + 10, + 0, + "CLIP" + ], + [ + 8, + 10, + 0, + 6, + 4, + "CONDITIONING" + ], + [ + 9, + 9, + 0, + 6, + 3, + "CONDITIONING" + ], + [ + 10, + 8, + 1, + 11, + 0, + "CLIP" + ], + [ + 11, + 8, + 1, + 12, + 0, + "CLIP" + ], + [ + 13, + 12, + 0, + 6, + 8, + "CONDITIONING" + ], + [ + 15, + 13, + 0, + 1, + 0, + "IMAGE" + ], + [ + 16, + 14, + 0, + 6, + 9, + "BBOX_DETECTOR" + ], + [ + 17, + 15, + 0, + 6, + 10, + "SAM_MODEL" + ], + [ + 18, + 1, + 0, + 16, + 0, + "IMAGE" + ], + [ + 19, + 1, + 1, + 17, + 0, + "IMAGE" + ], + [ + 20, + 1, + 2, + 18, + 0, + "IMAGE" + ], + [ + 64, + 13, + 0, + 37, + 0, + "IMAGE" + ], + [ + 67, + 1, + 0, + 38, + 0, + "IMAGE" + ], + [ + 69, + 8, + 0, + 6, + 5, + "MODEL" + ], + [ + 70, + 11, + 0, + 6, + 7, + "CONDITIONING" + ], + [ + 86, + 41, + 0, + 1, + 1, + "DETAILER_PIPE" + ], + [ + 87, + 42, + 0, + 41, + 0, + "BASIC_PIPE" + ], + [ + 88, + 43, + 0, + 41, + 1, + "BASIC_PIPE" + ], + [ + 103, + 1, + 4, + 44, + 1, + "DETAILER_PIPE" + ], + [ + 104, + 1, + 0, + 44, + 0, + "IMAGE" + ], + [ + 105, + 44, + 2, + 45, + 0, + "IMAGE" + ], + [ + 106, + 1, + 0, + 47, + 0, + "IMAGE" + ], + [ + 107, + 42, + 0, + 47, + 2, + "BASIC_PIPE" + ], + [ + 108, + 43, + 0, + 47, + 4, + "BASIC_PIPE" + ], + [ + 110, + 41, + 0, + 49, + 0, + "DETAILER_PIPE" + ], + [ + 111, + 49, + 0, + 47, + 1, + "SEGS" + ], + [ + 112, + 13, + 0, + 49, + 1, + "IMAGE" + ], + [ + 113, + 47, + 0, + 50, + 0, + "IMAGE" + ], + [ + 114, + 6, + 0, + 51, + 0, + "DETAILER_PIPE" + ], + [ + 116, + 51, + 2, + 40, + 1, + "CLIP" + ], + [ + 117, + 51, + 3, + 40, + 2, + "VAE" + ], + [ + 120, + 51, + 4, + 40, + 3, + "CONDITIONING" + ], + [ + 121, + 51, + 5, + 40, + 4, + "CONDITIONING" + ], + [ + 124, + 51, + 10, + 40, + 5, + "MODEL" + ], + [ + 125, + 51, + 1, + 40, + 0, + "MODEL" + ], + [ + 126, + 51, + 11, + 40, + 6, + "CLIP" + ], + [ + 127, + 51, + 12, + 40, + 7, + "CONDITIONING" + ], + [ + 128, + 51, + 13, + 40, + 8, + "CONDITIONING" + ], + [ + 129, + 51, + 6, + 40, + 9, + "BBOX_DETECTOR" + ], + [ + 130, + 51, + 7, + 40, + 10, + "SAM_MODEL" + ], + [ + 131, + 51, + 8, + 40, + 11, + "SEGM_DETECTOR" + ], + [ + 132, + 51, + 9, + 40, + 12, + "DETAILER_HOOK" + ], + [ + 133, + 51, + 6, + 41, + 2, + "BBOX_DETECTOR" + ], + [ + 134, + 51, + 7, + 41, + 3, + "SAM_MODEL" + ], + [ + 135, + 51, + 8, + 41, + 4, + "SEGM_DETECTOR" + ], + [ + 136, + 51, + 9, + 41, + 5, + "DETAILER_HOOK" + ], + [ + 137, + 51, + 1, + 42, + 0, + "MODEL" + ], + [ + 138, + 51, + 2, + 42, + 1, + "CLIP" + ], + [ + 139, + 51, + 3, + 42, + 2, + "VAE" + ], + [ + 142, + 51, + 10, + 43, + 0, + "MODEL" + ], + [ + 143, + 51, + 2, + 43, + 1, + "CLIP" + ], + [ + 145, + 51, + 3, + 43, + 2, + "VAE" + ], + [ + 147, + 51, + 4, + 42, + 3, + "CONDITIONING" + ], + [ + 148, + 51, + 5, + 42, + 4, + "CONDITIONING" + ], + [ + 149, + 51, + 12, + 43, + 3, + "CONDITIONING" + ], + [ + 150, + 51, + 13, + 43, + 4, + "CONDITIONING" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/detailer-pipe-test.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/detailer-pipe-test.json new file mode 100644 index 0000000000000000000000000000000000000000..d912152164a45d9f81ed888512ee188f2cc5e655 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/detailer-pipe-test.json @@ -0,0 +1,3489 @@ +{ + "last_node_id": 87, + "last_link_id": 214, + "nodes": [ + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 413, + 389 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5, + "label": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0, + "label": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, worst quality:1.4, low quality:1.4" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 415, + 186 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3, + "label": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0, + "label": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, 2 girls on table " + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 473, + 609 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0, + "label": "LATENT" + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 768, + 1 + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1209, + 188 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7, + "label": "samples" + }, + { + "name": "vae", + "type": "VAE", + "link": 8, + "label": "vae" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 10 + ], + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 30, + "type": "PreviewImage", + "pos": [ + 2532, + -7 + ], + "size": { + "0": 575.2411499023438, + "1": 561.0116577148438 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 179, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 24, + "type": "SAMLoader", + "pos": [ + 861, + 1300 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 19, + 33 + ], + "shape": 3, + "slot_index": 0, + "label": "SAM_MODEL" + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ] + }, + { + "id": 32, + "type": "BasicPipeToDetailerPipe", + "pos": [ + 1396, + 1143 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 34, + "label": "basic_pipe" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 202, + "slot_index": 1, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 33, + "slot_index": 2, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 213, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 36 + ], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipe" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, detailed eyes, \n__face_loras__ [faint smile|surprise|laugh]", + "Select the LoRA to add to the text" + ] + }, + { + "id": 36, + "type": "MaskToImage", + "pos": [ + 2650, + 1230 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 182, + "label": "mask" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 59 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 52, + "type": "BboxDetectorSEGS", + "pos": [ + 4948, + 677 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 85, + "label": "bbox_detector" + }, + { + "name": "image", + "type": "IMAGE", + "link": 188, + "label": "image" + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 87, + 160 + ], + "shape": 3, + "slot_index": 0, + "label": "SEGS" + } + ], + "properties": { + "Node name for S&R": "BboxDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 10, + 3, + 10 + ] + }, + { + "id": 46, + "type": "DetailerPipeToBasicPipe", + "pos": [ + 4753, + 1188 + ], + "size": { + "0": 304.79998779296875, + "1": 26 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 77, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 155, + 196 + ], + "shape": 3, + "slot_index": 0, + "label": "basic_pipe" + } + ], + "properties": { + "Node name for S&R": "DetailerPipeToBasicPipe" + } + }, + { + "id": 60, + "type": "PreviewImage", + "pos": [ + 6270, + 2420 + ], + "size": { + "0": 600, + "1": 670 + }, + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 166, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 57, + "type": "PreviewImage", + "pos": [ + 5997, + 1424 + ], + "size": { + "0": 840, + "1": 640 + }, + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 144, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 54, + "type": "PreviewImage", + "pos": [ + 6486, + 705 + ], + "size": { + "0": 740, + "1": 580 + }, + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 197, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 64, + "type": "PreviewImage", + "pos": [ + 6800, + -300 + ], + "size": { + "0": 570, + "1": 590 + }, + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 156, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 42, + "type": "PreviewImage", + "pos": [ + 4070, + 636 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 187, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 43, + "type": "MaskToImage", + "pos": [ + 4081, + 949 + ], + "size": { + "0": 176.39999389648438, + "1": 26 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 190, + "label": "mask" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 75 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 44, + "type": "PreviewImage", + "pos": [ + 4072, + 1029 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 75, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 37, + "type": "PreviewImage", + "pos": [ + 2890, + 1250 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 59, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 22, + "type": "BasicPipeToDetailerPipe", + "pos": [ + 1396, + 866 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 17, + "label": "basic_pipe" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 201, + "slot_index": 1, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 19, + "slot_index": 2, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 212, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipe" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, detailed eyes, \n[|||] [faint smile|surprise|laugh]", + "Select the LoRA to add to the text" + ] + }, + { + "id": 75, + "type": "PreviewImage", + "pos": [ + 2600, + 1330 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 181, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 10, + "type": "PreviewBridge", + "pos": [ + 1462, + 175 + ], + "size": { + "0": 315, + "1": 290 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 10, + "label": "images" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 169, + 183 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3, + "label": "MASK" + } + ], + "properties": { + "Node name for S&R": "PreviewBridge" + }, + "widgets_values": [ + "#placeholder" + ] + }, + { + "id": 41, + "type": "PreviewImage", + "pos": [ + 4301, + 119 + ], + "size": { + "0": 492.20916748046875, + "1": 448.6293029785156 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 186, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 78, + "type": "PreviewImage", + "pos": [ + 4075, + 1364 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 189, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 863, + 183 + ], + "size": { + "0": 315, + "1": 474 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1, + "label": "model" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6, + "label": "negative" + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2, + "label": "latent_image" + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0, + "label": "LATENT" + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 885412539640489, + "fixed", + 15, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 45, + "type": "EditDetailerPipe", + "pos": [ + 4338, + 950 + ], + "size": { + "0": 284.0971374511719, + "1": 316.5133361816406 + }, + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 191, + "label": "detailer_pipe" + }, + { + "name": "model", + "type": "MODEL", + "link": null, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": null, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": null, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": null, + "label": "bbox_detector" + }, + { + "name": "sam_model", + "type": "SAM_MODEL", + "link": null, + "label": "sam_model" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": null, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 77, + 82 + ], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "EditDetailerPipe" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 65, + "type": "PreviewImage", + "pos": [ + 6430, + -300 + ], + "size": { + "0": 330, + "1": 250 + }, + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 157, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 53, + "type": "MaskToSEGS", + "pos": [ + 5558, + 989 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 88, + "label": "mask" + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 138, + 154, + 195 + ], + "shape": 3, + "slot_index": 0, + "label": "SEGS" + } + ], + "properties": { + "Node name for S&R": "MaskToSEGS" + }, + "widgets_values": [ + false, + 3, + false, + 10 + ] + }, + { + "id": 81, + "type": "DetailerForEachPipe", + "pos": [ + 6092, + 708 + ], + "size": { + "0": 329.5368957519531, + "1": 598 + }, + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 194, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 195, + "label": "segs" + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 196, + "label": "basic_pipe" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 197 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "DetailerForEachPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 44457634171318, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 72, + "type": "DetailerForEachDebugPipe", + "pos": [ + 5938, + -58 + ], + "size": { + "0": 330, + "1": 618 + }, + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 153, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 154, + "label": "segs" + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 155, + "label": "basic_pipe" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 156 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped", + "type": "IMAGE", + "links": [ + 157 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 158 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_refined" + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "links": [ + 200 + ], + "shape": 6, + "slot_index": 3, + "label": "cropped_refined_alpha" + } + ], + "properties": { + "Node name for S&R": "DetailerForEachDebugPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 0, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 66, + "type": "PreviewImage", + "pos": [ + 6430, + 30 + ], + "size": { + "0": 330, + "1": 260 + }, + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 158, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 82, + "type": "PreviewImage", + "pos": [ + 6435, + 355 + ], + "size": { + "0": 319.2451171875, + "1": 285.4361572265625 + }, + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 200, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 83, + "type": "UltralyticsDetectorProvider", + "pos": [ + 860, + 1160 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "links": [ + 201, + 202 + ], + "shape": 3, + "slot_index": 0, + "label": "BBOX_DETECTOR" + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "links": null, + "shape": 3, + "slot_index": 1, + "label": "SEGM_DETECTOR" + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ] + }, + { + "id": 69, + "type": "DetailerForEach", + "pos": [ + 5610, + 1425 + ], + "size": { + "0": 315, + "1": 678 + }, + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 137, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 138, + "label": "segs" + }, + { + "name": "model", + "type": "MODEL", + "link": 139, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 140, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 141, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 142, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 143, + "label": "negative" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 144 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "DetailerForEach" + }, + "widgets_values": [ + 256, + true, + 768, + 0, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 50, + "type": "FromDetailerPipe", + "pos": [ + 4730, + 1460 + ], + "size": { + "0": 342.5999755859375, + "1": 186 + }, + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 82, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 139, + 161 + ], + "shape": 3, + "slot_index": 0, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 140, + 162 + ], + "shape": 3, + "slot_index": 1, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 141, + 163 + ], + "shape": 3, + "slot_index": 2, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 142, + 164 + ], + "shape": 3, + "slot_index": 3, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 143, + 165 + ], + "shape": 3, + "slot_index": 4, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 85 + ], + "shape": 3, + "slot_index": 5, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 83 + ], + "shape": 3, + "slot_index": 6, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 204 + ], + "shape": 3, + "slot_index": 7, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": null, + "shape": 3, + "label": "detailer_hook" + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipe" + } + }, + { + "id": 51, + "type": "SAMDetectorCombined", + "pos": [ + 5125, + 894 + ], + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "sam_model", + "type": "SAM_MODEL", + "link": 83, + "label": "sam_model" + }, + { + "name": "segs", + "type": "SEGS", + "link": 87, + "label": "segs" + }, + { + "name": "image", + "type": "IMAGE", + "link": 205, + "label": "image" + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 88 + ], + "shape": 3, + "slot_index": 0, + "label": "MASK" + } + ], + "properties": { + "Node name for S&R": "SAMDetectorCombined" + }, + "widgets_values": [ + "center-1", + 0, + 0.93, + 0, + 0.7, + "False" + ] + }, + { + "id": 85, + "type": "SEGSToImageList", + "pos": [ + 5569.134812187498, + 1289.240372597656 + ], + "size": { + "0": 304.79998779296875, + "1": 46 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 207, + "label": "segs" + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "link": 208, + "label": "fallback_image_opt" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 209 + ], + "shape": 6, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "SEGSToImageList" + } + }, + { + "id": 86, + "type": "PreviewImage", + "pos": [ + 6910, + 1420 + ], + "size": { + "0": 409.85064697265625, + "1": 614.9011840820312 + }, + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 209, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 39, + "type": "ToDetailerPipe", + "pos": [ + 3167, + 631 + ], + "size": { + "0": 400, + "1": 260 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 61, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 62, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 65, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 66, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 67, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 68, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 69, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 203, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 210 + ], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "ToDetailerPipe" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 76, + "type": "FaceDetailerPipe", + "pos": [ + 3648, + 641 + ], + "size": { + "0": 347.608154296875, + "1": 1060.470947265625 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 184, + "label": "image" + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 210, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 186, + 188 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 187 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped_refined" + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 189 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_enhanced_alpha" + }, + { + "name": "mask", + "type": "MASK", + "links": [ + 190 + ], + "shape": 3, + "slot_index": 3, + "label": "mask" + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 191 + ], + "shape": 3, + "slot_index": 4, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 284739423125169, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + 0.5, + 10, + 3, + "center-1", + 0, + 0.93, + 0, + 0.7, + "False", + 10 + ] + }, + { + "id": 49, + "type": "Reroute", + "pos": [ + 4967, + 568 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 211, + "label": "" + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 137, + 153, + 159, + 194 + ], + "slot_index": 0, + "label": "" + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 27, + "type": "PreviewImage", + "pos": [ + 2590, + 920 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 180, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 74, + "type": "FaceDetailer", + "pos": [ + 2050, + 580 + ], + "size": { + "0": 372.5969543457031, + "1": 1103.0477294921875 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 169, + "label": "image" + }, + { + "name": "model", + "type": "MODEL", + "link": 170, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 171, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 172, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 175, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 176, + "slot_index": 5, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 177, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 178, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 214, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 179, + 211 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 180 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped_refined" + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 181 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_enhanced_alpha" + }, + { + "name": "mask", + "type": "MASK", + "links": [ + 182 + ], + "shape": 3, + "slot_index": 3, + "label": "mask" + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 193 + ], + "shape": 3, + "slot_index": 4, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "FaceDetailer" + }, + "widgets_values": [ + 256, + true, + 768, + 872368928997833, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + 0.5, + 10, + 3, + "center-1", + 0, + 0.93, + 0, + 0.7, + "False", + 10, + "" + ] + }, + { + "id": 38, + "type": "FromDetailerPipe", + "pos": [ + 2740, + 630 + ], + "size": { + "0": 342.5999755859375, + "1": 186 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 193, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 61 + ], + "shape": 3, + "slot_index": 0, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 62 + ], + "shape": 3, + "slot_index": 1, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 65 + ], + "shape": 3, + "slot_index": 2, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 66 + ], + "shape": 3, + "slot_index": 3, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 67 + ], + "shape": 3, + "slot_index": 4, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 68 + ], + "shape": 3, + "slot_index": 5, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 69 + ], + "shape": 3, + "slot_index": 6, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 203 + ], + "shape": 3, + "slot_index": 7, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": null, + "shape": 3, + "label": "detailer_hook" + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipe" + } + }, + { + "id": 87, + "type": "UltralyticsDetectorProvider", + "pos": [ + 862, + 1445 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "links": [], + "shape": 3, + "slot_index": 0, + "label": "BBOX_DETECTOR" + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "links": [ + 212, + 213 + ], + "shape": 3, + "slot_index": 1, + "label": "SEGM_DETECTOR" + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "segm/person_yolov8m-seg.pt" + ] + }, + { + "id": 77, + "type": "Reroute", + "pos": [ + 3500, + 170 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 183, + "label": "" + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 184, + 205, + 206, + 208 + ], + "slot_index": 0, + "label": "" + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 84, + "type": "SegmDetectorSEGS", + "pos": [ + 5130, + 1240 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "segm_detector", + "type": "SEGM_DETECTOR", + "link": 204, + "label": "segm_detector" + }, + { + "name": "image", + "type": "IMAGE", + "link": 206, + "label": "image" + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 207 + ], + "shape": 3, + "slot_index": 0, + "label": "SEGS" + } + ], + "properties": { + "Node name for S&R": "SegmDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 10, + 3, + 1 + ] + }, + { + "id": 34, + "type": "FromDetailerPipe", + "pos": [ + 1737, + -34 + ], + "size": { + "0": 342.5999755859375, + "1": 186 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 36, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 170 + ], + "shape": 3, + "slot_index": 0, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 171 + ], + "shape": 3, + "slot_index": 1, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 172 + ], + "shape": 3, + "slot_index": 2, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 175 + ], + "shape": 3, + "slot_index": 3, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 176 + ], + "shape": 3, + "slot_index": 4, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 177 + ], + "shape": 3, + "slot_index": 5, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 178 + ], + "shape": 3, + "slot_index": 6, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 214 + ], + "shape": 3, + "slot_index": 7, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": null, + "shape": 3, + "label": "detailer_hook" + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipe" + } + }, + { + "id": 73, + "type": "DetailerForEachDebug", + "pos": [ + 5603, + 2282 + ], + "size": { + "0": 315, + "1": 678 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 159, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 160, + "label": "segs" + }, + { + "name": "model", + "type": "MODEL", + "link": 161, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 162, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 163, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 164, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 165, + "label": "negative" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 166 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped", + "type": "IMAGE", + "links": [ + 167 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 168 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_refined" + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "links": null, + "shape": 6, + "label": "cropped_refined_alpha" + } + ], + "properties": { + "Node name for S&R": "DetailerForEachDebug" + }, + "widgets_values": [ + 256, + true, + 768, + 225176759887640, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 61, + "type": "PreviewImage", + "pos": [ + 6000, + 2450 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 167, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 62, + "type": "PreviewImage", + "pos": [ + 5990, + 2780 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 168, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + 26, + 474 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1 + ], + "slot_index": 0, + "label": "MODEL" + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1, + "label": "CLIP" + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "slot_index": 2, + "label": "VAE" + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors" + ] + }, + { + "id": 19, + "type": "## make-basic_pipe [2c8c61]", + "pos": [ + 502, + 860 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "vae_opt", + "type": "VAE", + "link": null, + "label": "vae_opt" + } + ], + "outputs": [ + { + "name": "BASIC_PIPE", + "type": "BASIC_PIPE", + "links": [ + 17, + 34 + ], + "shape": 3, + "slot_index": 0, + "label": "BASIC_PIPE" + } + ], + "title": "## make-basic_pipe", + "properties": { + "Node name for S&R": "## make-basic_pipe [2c8c61]" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors", + "", + "text, watermark, worst quality:1.4, low quality:1.4" + ] + } + ], + "links": [ + [ + 1, + 4, + 0, + 3, + 0, + "MODEL" + ], + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 10, + 8, + 0, + 10, + 0, + "IMAGE" + ], + [ + 17, + 19, + 0, + 22, + 0, + "BASIC_PIPE" + ], + [ + 19, + 24, + 0, + 22, + 2, + "SAM_MODEL" + ], + [ + 33, + 24, + 0, + 32, + 2, + "SAM_MODEL" + ], + [ + 34, + 19, + 0, + 32, + 0, + "BASIC_PIPE" + ], + [ + 36, + 32, + 0, + 34, + 0, + "DETAILER_PIPE" + ], + [ + 59, + 36, + 0, + 37, + 0, + "IMAGE" + ], + [ + 61, + 38, + 0, + 39, + 0, + "MODEL" + ], + [ + 62, + 38, + 1, + 39, + 1, + "CLIP" + ], + [ + 65, + 38, + 2, + 39, + 2, + "VAE" + ], + [ + 66, + 38, + 3, + 39, + 3, + "CONDITIONING" + ], + [ + 67, + 38, + 4, + 39, + 4, + "CONDITIONING" + ], + [ + 68, + 38, + 5, + 39, + 5, + "BBOX_DETECTOR" + ], + [ + 69, + 38, + 6, + 39, + 6, + "SAM_MODEL" + ], + [ + 75, + 43, + 0, + 44, + 0, + "IMAGE" + ], + [ + 77, + 45, + 0, + 46, + 0, + "DETAILER_PIPE" + ], + [ + 82, + 45, + 0, + 50, + 0, + "DETAILER_PIPE" + ], + [ + 83, + 50, + 6, + 51, + 0, + "SAM_MODEL" + ], + [ + 85, + 50, + 5, + 52, + 0, + "BBOX_DETECTOR" + ], + [ + 87, + 52, + 0, + 51, + 1, + "SEGS" + ], + [ + 88, + 51, + 0, + 53, + 0, + "MASK" + ], + [ + 137, + 49, + 0, + 69, + 0, + "IMAGE" + ], + [ + 138, + 53, + 0, + 69, + 1, + "SEGS" + ], + [ + 139, + 50, + 0, + 69, + 2, + "MODEL" + ], + [ + 140, + 50, + 1, + 69, + 3, + "CLIP" + ], + [ + 141, + 50, + 2, + 69, + 4, + "VAE" + ], + [ + 142, + 50, + 3, + 69, + 5, + "CONDITIONING" + ], + [ + 143, + 50, + 4, + 69, + 6, + "CONDITIONING" + ], + [ + 144, + 69, + 0, + 57, + 0, + "IMAGE" + ], + [ + 153, + 49, + 0, + 72, + 0, + "IMAGE" + ], + [ + 154, + 53, + 0, + 72, + 1, + "SEGS" + ], + [ + 155, + 46, + 0, + 72, + 2, + "BASIC_PIPE" + ], + [ + 156, + 72, + 0, + 64, + 0, + "IMAGE" + ], + [ + 157, + 72, + 1, + 65, + 0, + "IMAGE" + ], + [ + 158, + 72, + 2, + 66, + 0, + "IMAGE" + ], + [ + 159, + 49, + 0, + 73, + 0, + "IMAGE" + ], + [ + 160, + 52, + 0, + 73, + 1, + "SEGS" + ], + [ + 161, + 50, + 0, + 73, + 2, + "MODEL" + ], + [ + 162, + 50, + 1, + 73, + 3, + "CLIP" + ], + [ + 163, + 50, + 2, + 73, + 4, + "VAE" + ], + [ + 164, + 50, + 3, + 73, + 5, + "CONDITIONING" + ], + [ + 165, + 50, + 4, + 73, + 6, + "CONDITIONING" + ], + [ + 166, + 73, + 0, + 60, + 0, + "IMAGE" + ], + [ + 167, + 73, + 1, + 61, + 0, + "IMAGE" + ], + [ + 168, + 73, + 2, + 62, + 0, + "IMAGE" + ], + [ + 169, + 10, + 0, + 74, + 0, + "IMAGE" + ], + [ + 170, + 34, + 0, + 74, + 1, + "MODEL" + ], + [ + 171, + 34, + 1, + 74, + 2, + "CLIP" + ], + [ + 172, + 34, + 2, + 74, + 3, + "VAE" + ], + [ + 175, + 34, + 3, + 74, + 4, + "CONDITIONING" + ], + [ + 176, + 34, + 4, + 74, + 5, + "CONDITIONING" + ], + [ + 177, + 34, + 5, + 74, + 6, + "BBOX_DETECTOR" + ], + [ + 178, + 34, + 6, + 74, + 7, + "SAM_MODEL" + ], + [ + 179, + 74, + 0, + 30, + 0, + "IMAGE" + ], + [ + 180, + 74, + 1, + 27, + 0, + "IMAGE" + ], + [ + 181, + 74, + 2, + 75, + 0, + "IMAGE" + ], + [ + 182, + 74, + 3, + 36, + 0, + "MASK" + ], + [ + 183, + 10, + 0, + 77, + 0, + "*" + ], + [ + 184, + 77, + 0, + 76, + 0, + "IMAGE" + ], + [ + 186, + 76, + 0, + 41, + 0, + "IMAGE" + ], + [ + 187, + 76, + 1, + 42, + 0, + "IMAGE" + ], + [ + 188, + 76, + 0, + 52, + 1, + "IMAGE" + ], + [ + 189, + 76, + 2, + 78, + 0, + "IMAGE" + ], + [ + 190, + 76, + 3, + 43, + 0, + "MASK" + ], + [ + 191, + 76, + 4, + 45, + 0, + "DETAILER_PIPE" + ], + [ + 193, + 74, + 4, + 38, + 0, + "DETAILER_PIPE" + ], + [ + 194, + 49, + 0, + 81, + 0, + "IMAGE" + ], + [ + 195, + 53, + 0, + 81, + 1, + "SEGS" + ], + [ + 196, + 46, + 0, + 81, + 2, + "BASIC_PIPE" + ], + [ + 197, + 81, + 0, + 54, + 0, + "IMAGE" + ], + [ + 200, + 72, + 3, + 82, + 0, + "IMAGE" + ], + [ + 201, + 83, + 0, + 22, + 1, + "BBOX_DETECTOR" + ], + [ + 202, + 83, + 0, + 32, + 1, + "BBOX_DETECTOR" + ], + [ + 203, + 38, + 7, + 39, + 7, + "SEGM_DETECTOR" + ], + [ + 204, + 50, + 7, + 84, + 0, + "SEGM_DETECTOR" + ], + [ + 205, + 77, + 0, + 51, + 2, + "IMAGE" + ], + [ + 206, + 77, + 0, + 84, + 1, + "IMAGE" + ], + [ + 207, + 84, + 0, + 85, + 0, + "SEGS" + ], + [ + 208, + 77, + 0, + 85, + 1, + "IMAGE" + ], + [ + 209, + 85, + 0, + 86, + 0, + "IMAGE" + ], + [ + 210, + 39, + 0, + 76, + 1, + "DETAILER_PIPE" + ], + [ + 211, + 74, + 0, + 49, + 0, + "*" + ], + [ + 212, + 87, + 1, + 22, + 3, + "SEGM_DETECTOR" + ], + [ + 213, + 87, + 1, + 32, + 3, + "SEGM_DETECTOR" + ], + [ + 214, + 34, + 7, + 74, + 8, + "SEGM_DETECTOR" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/impactwildcardprocessor_separate_tests.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/impactwildcardprocessor_separate_tests.json new file mode 100644 index 0000000000000000000000000000000000000000..1e3d326eecbecad61ffc64416a79d5b2b2654113 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/impactwildcardprocessor_separate_tests.json @@ -0,0 +1 @@ +{"last_node_id":87,"last_link_id":21,"nodes":[{"id":5,"type":"ShowText|pysssss","pos":[-12595.3798828125,-4455.8984375],"size":[315,76],"flags":{},"order":47,"mode":0,"inputs":[{"name":"text","type":"STRING","link":2,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 1 3 2"]},{"id":6,"type":"ImpactWildcardProcessor","pos":[-13061.4169921875,-5009.56689453125],"size":[400,222],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[3],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$0|1|2|3}","0 2","populate",501646603955617,"randomize","Select the Wildcard to add to the text"]},{"id":9,"type":"ShowText|pysssss","pos":[-12614.6201171875,-5330.05126953125],"size":[315,76],"flags":{},"order":37,"mode":0,"inputs":[{"name":"text","type":"STRING","link":4,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","3"]},{"id":11,"type":"ShowText|pysssss","pos":[-12604.265625,-4732.607421875],"size":[315,76],"flags":{},"order":46,"mode":0,"inputs":[{"name":"text","type":"STRING","link":5,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1 3 2"]},{"id":7,"type":"ShowText|pysssss","pos":[-12602.5078125,-5013.27099609375],"size":[315,76],"flags":{},"order":36,"mode":0,"inputs":[{"name":"text","type":"STRING","link":3,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 2"]},{"id":8,"type":"ImpactWildcardProcessor","pos":[-13065.423828125,-5326.34716796875],"size":[400,222],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[4],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{-3$$0|1|2|3}","3","populate",248114349904436,"randomize","Select the Wildcard to add to the text"]},{"id":23,"type":"ShowText|pysssss","pos":[-11348.609375,-4470.376953125],"size":[315,76],"flags":{},"order":41,"mode":0,"inputs":[{"name":"text","type":"STRING","link":6,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","2"]},{"id":25,"type":"ShowText|pysssss","pos":[-11367.849609375,-5344.52978515625],"size":[315,76],"flags":{},"order":38,"mode":0,"inputs":[{"name":"text","type":"STRING","link":7,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","2, 0"]},{"id":26,"type":"ShowText|pysssss","pos":[-11357.4951171875,-4747.0859375],"size":[315,76],"flags":{},"order":40,"mode":0,"inputs":[{"name":"text","type":"STRING","link":8,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","3, 0, 1, 2"]},{"id":27,"type":"ShowText|pysssss","pos":[-11355.7373046875,-5027.74951171875],"size":[315,76],"flags":{},"order":39,"mode":0,"inputs":[{"name":"text","type":"STRING","link":9,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1, 2"]},{"id":29,"type":"ImpactWildcardProcessor","pos":[-11818.6533203125,-5340.82568359375],"size":[400,222],"flags":{},"order":2,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[7],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{-3$$, $$0|1|2|3}","2, 0","populate",459223692509123,"randomize","Select the Wildcard to add to the text"]},{"id":24,"type":"ImpactWildcardProcessor","pos":[-11814.646484375,-5024.04541015625],"size":[400,222],"flags":{},"order":3,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[9],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$, $$0|1|2|3}","0, 1, 2","populate",937484056347845,"randomize","Select the Wildcard to add to the text"]},{"id":28,"type":"ImpactWildcardProcessor","pos":[-11819.107421875,-4747.43505859375],"size":[400,222],"flags":{},"order":4,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[8],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-9999999$$, $$0|1|2|3}","3, 0, 1, 2","populate",486886596391539,"randomize","Select the Wildcard to add to the text"]},{"id":22,"type":"ImpactWildcardProcessor","pos":[-11818.48046875,-4471.24951171875],"size":[400,222],"flags":{},"order":5,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[6],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-$$, $$0|1|2|3}","2","populate",125928663019713,"randomize","Select the Wildcard to add to the text"]},{"id":48,"type":"ShowText|pysssss","pos":[-12539.638671875,-2811.31591796875],"size":[315,76],"flags":{},"order":45,"mode":0,"inputs":[{"name":"text","type":"STRING","link":10,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 2, 0, 1 0, 1, 2 __test_wildcard/empty_item/*__ __test_wildcard/empty_item/*__"]},{"id":50,"type":"ShowText|pysssss","pos":[-12558.87890625,-3685.46875],"size":[315,76],"flags":{},"order":42,"mode":0,"inputs":[{"name":"text","type":"STRING","link":11,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 __test_wildcard/empty_item/*__ 0, 1, 2"]},{"id":51,"type":"ShowText|pysssss","pos":[-12548.5244140625,-3088.02490234375],"size":[315,76],"flags":{},"order":44,"mode":0,"inputs":[{"name":"text","type":"STRING","link":12,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 0 __test_wildcard/empty_item/*__ 1 0 0 1 0 __test_wildcard/empty_item/*__"]},{"id":52,"type":"ShowText|pysssss","pos":[-12546.7666015625,-3368.6884765625],"size":[315,76],"flags":{},"order":43,"mode":0,"inputs":[{"name":"text","type":"STRING","link":13,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","__test_wildcard/empty_item/*__ __test_wildcard/empty_item/*__ 0"]},{"id":55,"type":"ShowText|pysssss","pos":[-11292.8681640625,-2825.79443359375],"size":[315,76],"flags":{},"order":48,"mode":0,"inputs":[{"name":"text","type":"STRING","link":14,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1 2 0separator0separator0separator0, 1, 2"]},{"id":56,"type":"ShowText|pysssss","pos":[-11312.1083984375,-3699.947265625],"size":[315,76],"flags":{},"order":51,"mode":0,"inputs":[{"name":"text","type":"STRING","link":15,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1, 2separator__test_wildcard/empty_item/*__separator0"]},{"id":57,"type":"ShowText|pysssss","pos":[-11301.75390625,-3102.50341796875],"size":[315,76],"flags":{},"order":49,"mode":0,"inputs":[{"name":"text","type":"STRING","link":16,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","2, 1separator0, 1, 2separator__test_wildcard/empty_item/*__separator__test_wildcard/empty_item/*__separator0 1 2separator0, 1separator0separator0"]},{"id":58,"type":"ShowText|pysssss","pos":[-11299.99609375,-3383.1669921875],"size":[315,76],"flags":{},"order":50,"mode":0,"inputs":[{"name":"text","type":"STRING","link":17,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","__test_wildcard/empty_item/*__separator0"]},{"id":60,"type":"MarkdownNote","pos":[-11900.6484375,-3872.76025390625],"size":[536.965087890625,103.656005859375],"flags":{},"order":6,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$sep$$"],"color":"#432","bgcolor":"#653"},{"id":59,"type":"MarkdownNote","pos":[-13181.8271484375,-3886.89794921875],"size":[536.965087890625,103.656005859375],"flags":{},"order":7,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$"],"color":"#432","bgcolor":"#653"},{"id":35,"type":"MarkdownNote","pos":[-13237.568359375,-5531.48046875],"size":[536.965087890625,103.656005859375],"flags":{},"order":8,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$"],"color":"#432","bgcolor":"#653"},{"id":36,"type":"MarkdownNote","pos":[-11956.3896484375,-5517.3427734375],"size":[536.965087890625,103.656005859375],"flags":{},"order":9,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$sep$$"],"color":"#432","bgcolor":"#653"},{"id":54,"type":"ImpactWildcardProcessor","pos":[-13009.6826171875,-3681.7646484375],"size":[400,222],"flags":{},"order":10,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[11],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{-3$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","0 __test_wildcard/empty_item/*__ 0, 1, 2","populate",198720804736378,"randomize","Select the Wildcard to add to the text"]},{"id":49,"type":"ImpactWildcardProcessor","pos":[-13005.67578125,-3364.984375],"size":[400,222],"flags":{},"order":11,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[13],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","__test_wildcard/empty_item/*__ __test_wildcard/empty_item/*__ 0","populate",608971834515154,"randomize","Select the Wildcard to add to the text"]},{"id":53,"type":"ImpactWildcardProcessor","pos":[-13010.13671875,-3088.3740234375],"size":[400,222],"flags":{},"order":12,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[12],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-9999999$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","0 0 __test_wildcard/empty_item/*__ 1 0 0 1 0 __test_wildcard/empty_item/*__","populate",849105155936244,"randomize","Select the Wildcard to add to the text"]},{"id":47,"type":"ImpactWildcardProcessor","pos":[-13009.509765625,-2812.1884765625],"size":[400,222],"flags":{},"order":13,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[10],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","0 2, 0, 1 0, 1, 2 __test_wildcard/empty_item/*__ __test_wildcard/empty_item/*__","populate",1084307274687290,"randomize","Select the Wildcard to add to the text"]},{"id":43,"type":"MarkdownNote","pos":[-13416.291015625,-5316.21484375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":14,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: -n$$\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":37,"type":"MarkdownNote","pos":[-12123.849609375,-5326.48681640625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":15,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: -n$$\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":44,"type":"MarkdownNote","pos":[-13406.4375,-4992.236328125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":16,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-n2$$\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":68,"type":"MarkdownNote","pos":[-13360.5498046875,-3671.63232421875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":17,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: -n$$\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":69,"type":"MarkdownNote","pos":[-12068.1083984375,-3681.904296875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":18,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: -n$$\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":66,"type":"MarkdownNote","pos":[-13350.6962890625,-3347.65380859375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":19,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-n2$$\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":38,"type":"MarkdownNote","pos":[-12113.99609375,-5002.50830078125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":20,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-n2$$\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":42,"type":"MarkdownNote","pos":[-13408.37109375,-4732.05224609375],"size":[284.8597717285156,218.01190185546875],"flags":{},"order":21,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-(itemcount <<<< n2)$$\n## expected \nseparator = \" \" \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":65,"type":"MarkdownNote","pos":[-13352.6298828125,-3087.4697265625],"size":[284.8597717285156,218.01190185546875],"flags":{},"order":22,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-(itemcount <<<< n2)$$\n## expected \nseparator = \" \" \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":67,"type":"MarkdownNote","pos":[-13354.5634765625,-2812.72412109375],"size":[281.3926086425781,206.91697692871094],"flags":{},"order":23,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-$$\n## expected \nseparator = \" \" \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":72,"type":"MarkdownNote","pos":[-12062.1220703125,-2822.9970703125],"size":[281.3926086425781,206.91697692871094],"flags":{},"order":24,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-$$\n## expected \nseparator = sep \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":71,"type":"MarkdownNote","pos":[-12060.1884765625,-3097.74169921875],"size":[284.8597717285156,218.01190185546875],"flags":{},"order":25,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-(itemcount <<<< n2)$$\n## expected \nseparator = sep \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":70,"type":"MarkdownNote","pos":[-12058.2548828125,-3357.92578125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":26,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-n2$$\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":41,"type":"MarkdownNote","pos":[-12117.86328125,-4467.57958984375],"size":[281.3926086425781,206.91697692871094],"flags":{},"order":27,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-$$\n## expected \nseparator = sep \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":40,"type":"MarkdownNote","pos":[-12115.9296875,-4742.32421875],"size":[284.8597717285156,218.01190185546875],"flags":{},"order":28,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-(itemcount <<<< n2)$$\n## expected \nseparator = sep \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":10,"type":"ImpactWildcardProcessor","pos":[-13065.8779296875,-4732.95654296875],"size":[400,222],"flags":{},"order":29,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[5],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-9999999$$0|1|2|3}","1 3 2","populate",924132932593852,"randomize","Select the Wildcard to add to the text"]},{"id":45,"type":"MarkdownNote","pos":[-13410.3046875,-4457.306640625],"size":[281.3926086425781,206.91697692871094],"flags":{},"order":30,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-$$\n## expected \nseparator = \" \" \nprobability: len(result) \n 1: 25%, 2: 25%, 3: 25%. 4: 25% \n\n"],"color":"#432","bgcolor":"#653"},{"id":4,"type":"ImpactWildcardProcessor","pos":[-13065.2509765625,-4456.77099609375],"size":[400,222],"flags":{},"order":31,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-$$0|1|2|3}","0 1 3 2","populate",164209619769734,"randomize","Select the Wildcard to add to the text"]},{"id":64,"type":"ImpactWildcardProcessor","pos":[-11762.7392578125,-2826.6669921875],"size":[400,222],"flags":{},"order":32,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[14],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-$$separator$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","1 2 0separator0separator0separator0, 1, 2","populate",1004675672912818,"randomize","Select the Wildcard to add to the text"]},{"id":63,"type":"ImpactWildcardProcessor","pos":[-11763.3662109375,-3102.8525390625],"size":[400,222],"flags":{},"order":33,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[16],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-9999999$$separator$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","2, 1separator0, 1, 2separator__test_wildcard/empty_item/*__separator__test_wildcard/empty_item/*__separator0 1 2separator0, 1separator0separator0","populate",991625019002598,"randomize","Select the Wildcard to add to the text"]},{"id":62,"type":"ImpactWildcardProcessor","pos":[-11758.9052734375,-3379.462890625],"size":[400,222],"flags":{},"order":34,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[17],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","__test_wildcard/empty_item/*__separator0","populate",315424983318226,"randomize","Select the Wildcard to add to the text"]},{"id":61,"type":"ImpactWildcardProcessor","pos":[-11762.912109375,-3696.2431640625],"size":[400,222],"flags":{},"order":35,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[15],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{-3$$separator$$__test_wildcard/single_text_only/*__|__test_wildcard/empty_item/*__|__test_wildcard/text_only/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/raw_wildcard/*__|__test_wildcard/reference_wildcard/*__|__test_wildcard/reference_single_text_only_wildcard/*__|__test_wildcard/reference_empty_item/*__}","0, 1, 2separator__test_wildcard/empty_item/*__separator0","populate",980974218142586,"randomize","Select the Wildcard to add to the text"]}],"links":[[2,4,0,5,0,"STRING"],[3,6,0,7,0,"STRING"],[4,8,0,9,0,"STRING"],[5,10,0,11,0,"STRING"],[6,22,0,23,0,"STRING"],[7,29,0,25,0,"STRING"],[8,28,0,26,0,"STRING"],[9,24,0,27,0,"STRING"],[10,47,0,48,0,"STRING"],[11,54,0,50,0,"STRING"],[12,53,0,51,0,"STRING"],[13,49,0,52,0,"STRING"],[14,64,0,55,0,"STRING"],[15,61,0,56,0,"STRING"],[16,63,0,57,0,"STRING"],[17,62,0,58,0,"STRING"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.8991465919831495,"offset":[14117.986929566414,3548.5409155177076]},"node_versions":{"ComfyUI-Custom-Scripts":"bc8922deff73f59311c05cef27b9d4caaf43e87b","ComfyUI-Impact-Pack":"ebcb6f91abf4c8de1ab3636260959177615566ad"}},"version":0.4} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/impactwildcardprocessor_yaml_tests.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/impactwildcardprocessor_yaml_tests.json new file mode 100644 index 0000000000000000000000000000000000000000..c4452bb0fb69b1c8ba05cac50bdc04d121d0b7d6 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/impactwildcardprocessor_yaml_tests.json @@ -0,0 +1 @@ +{"last_node_id":161,"last_link_id":43,"nodes":[{"id":9,"type":"ShowText|pysssss","pos":[-12730.7314453125,-6579.96533203125],"size":[315,76],"flags":{},"order":85,"mode":0,"inputs":[{"name":"text","type":"STRING","link":4,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":25,"type":"ShowText|pysssss","pos":[-11483.9609375,-6594.44384765625],"size":[315,76],"flags":{},"order":86,"mode":0,"inputs":[{"name":"text","type":"STRING","link":7,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":48,"type":"ShowText|pysssss","pos":[-12539.638671875,-2811.31591796875],"size":[315,76],"flags":{},"order":71,"mode":0,"inputs":[{"name":"text","type":"STRING","link":10,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 0"]},{"id":50,"type":"ShowText|pysssss","pos":[-12558.87890625,-3685.46875],"size":[315,76],"flags":{},"order":74,"mode":0,"inputs":[{"name":"text","type":"STRING","link":11,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 __test_wildcard/empty_item/*__ 0"]},{"id":51,"type":"ShowText|pysssss","pos":[-12548.5244140625,-3088.02490234375],"size":[315,76],"flags":{},"order":72,"mode":0,"inputs":[{"name":"text","type":"STRING","link":12,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 0"]},{"id":52,"type":"ShowText|pysssss","pos":[-12546.7666015625,-3368.6884765625],"size":[315,76],"flags":{},"order":73,"mode":0,"inputs":[{"name":"text","type":"STRING","link":13,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 __test_wildcard/empty_item/*__"]},{"id":56,"type":"ShowText|pysssss","pos":[-11312.1083984375,-3699.947265625],"size":[315,76],"flags":{},"order":75,"mode":0,"inputs":[{"name":"text","type":"STRING","link":15,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1, 2separator0"]},{"id":57,"type":"ShowText|pysssss","pos":[-11301.75390625,-3102.50341796875],"size":[315,76],"flags":{},"order":77,"mode":0,"inputs":[{"name":"text","type":"STRING","link":16,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","bseparator0separator0 2"]},{"id":58,"type":"ShowText|pysssss","pos":[-11299.99609375,-3383.1669921875],"size":[315,76],"flags":{},"order":76,"mode":0,"inputs":[{"name":"text","type":"STRING","link":17,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":60,"type":"MarkdownNote","pos":[-11900.6484375,-3872.76025390625],"size":[536.965087890625,103.656005859375],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$sep$$"],"color":"#432","bgcolor":"#653"},{"id":36,"type":"MarkdownNote","pos":[-12072.5009765625,-6767.2568359375],"size":[536.965087890625,103.656005859375],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$sep$$"],"color":"#432","bgcolor":"#653"},{"id":59,"type":"MarkdownNote","pos":[-13181.8271484375,-3886.89794921875],"size":[536.965087890625,103.656005859375],"flags":{},"order":2,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$"],"color":"#432","bgcolor":"#653"},{"id":91,"type":"ShowText|pysssss","pos":[-12719.66796875,-6245.978515625],"size":[315,76],"flags":{},"order":84,"mode":0,"inputs":[{"name":"text","type":"STRING","link":22,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":[""]},{"id":92,"type":"ShowText|pysssss","pos":[-11472.8974609375,-6260.45703125],"size":[315,76],"flags":{},"order":87,"mode":0,"inputs":[{"name":"text","type":"STRING","link":23,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":[""]},{"id":97,"type":"ShowText|pysssss","pos":[-12766.0146484375,-5921.5537109375],"size":[315,76],"flags":{},"order":83,"mode":0,"inputs":[{"name":"text","type":"STRING","link":24,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1, 2"]},{"id":98,"type":"ShowText|pysssss","pos":[-11519.244140625,-5936.0322265625],"size":[315,76],"flags":{},"order":88,"mode":0,"inputs":[{"name":"text","type":"STRING","link":25,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0separator0, 1, 2separator0, 1"]},{"id":103,"type":"ShowText|pysssss","pos":[-12766.0166015625,-5608.7158203125],"size":[315,76],"flags":{},"order":82,"mode":0,"inputs":[{"name":"text","type":"STRING","link":26,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","2, 1 0 1 0 2"]},{"id":104,"type":"ShowText|pysssss","pos":[-11519.24609375,-5623.1943359375],"size":[315,76],"flags":{},"order":89,"mode":0,"inputs":[{"name":"text","type":"STRING","link":27,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1separator1 0 2separator0 2"]},{"id":35,"type":"MarkdownNote","pos":[-13353.6796875,-6781.39453125],"size":[536.965087890625,103.656005859375],"flags":{},"order":3,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$"],"color":"#432","bgcolor":"#653"},{"id":109,"type":"ShowText|pysssss","pos":[-12747.9560546875,-5278.75244140625],"size":[315,76],"flags":{},"order":81,"mode":0,"inputs":[{"name":"text","type":"STRING","link":28,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1, 2"]},{"id":110,"type":"ShowText|pysssss","pos":[-11501.185546875,-5293.23095703125],"size":[315,76],"flags":{},"order":90,"mode":0,"inputs":[{"name":"text","type":"STRING","link":29,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0, 1, 2"]},{"id":115,"type":"ShowText|pysssss","pos":[-12716.0927734375,-4941.001953125],"size":[315,76],"flags":{},"order":80,"mode":0,"inputs":[{"name":"text","type":"STRING","link":30,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":116,"type":"ShowText|pysssss","pos":[-11469.322265625,-4955.48046875],"size":[315,76],"flags":{},"order":91,"mode":0,"inputs":[{"name":"text","type":"STRING","link":31,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":121,"type":"ShowText|pysssss","pos":[-12728.8369140625,-4574.576171875],"size":[315,76],"flags":{},"order":79,"mode":0,"inputs":[{"name":"text","type":"STRING","link":32,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","__test_wildcard/empty_item/*__"]},{"id":122,"type":"ShowText|pysssss","pos":[-11482.06640625,-4589.0546875],"size":[315,76],"flags":{},"order":92,"mode":0,"inputs":[{"name":"text","type":"STRING","link":33,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","__test_wildcard/empty_item/*__"]},{"id":126,"type":"MarkdownNote","pos":[-12238.06640625,-4571.01171875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":4,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/reference_empty_item/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n \"\": 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":96,"type":"MarkdownNote","pos":[-12228.8974609375,-6242.4140625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":5,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/empty_text/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n \"\": 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":37,"type":"MarkdownNote","pos":[-12239.9609375,-6576.40087890625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":6,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/single_text_only/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":43,"type":"MarkdownNote","pos":[-13532.40234375,-6566.12890625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":7,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/single_text_only/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":125,"type":"MarkdownNote","pos":[-13530.5078125,-4560.73974609375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":8,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/reference_empty_item/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n \"\": 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":47,"type":"ImpactWildcardProcessor","pos":[-13009.509765625,-2812.1884765625],"size":[400,222],"flags":{},"order":9,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[10],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-$$__test_wildcard/include_all/*__}","0 0","populate",898121972509601,"randomize","Select the Wildcard to add to the text"]},{"id":53,"type":"ImpactWildcardProcessor","pos":[-13010.13671875,-3088.3740234375],"size":[400,222],"flags":{},"order":10,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[12],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-9999999$$__test_wildcard/include_all/*__}","0 0","populate",494528958312924,"randomize","Select the Wildcard to add to the text"]},{"id":49,"type":"ImpactWildcardProcessor","pos":[-13005.67578125,-3364.984375],"size":[400,222],"flags":{},"order":11,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[13],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/include_all/*__}","0 __test_wildcard/empty_item/*__","populate",575439885343933,"randomize","Select the Wildcard to add to the text"]},{"id":54,"type":"ImpactWildcardProcessor","pos":[-13009.6826171875,-3681.7646484375],"size":[400,222],"flags":{},"order":12,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[11],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{-3$$__test_wildcard/include_all/*__}","0 __test_wildcard/empty_item/*__ 0","populate",451409998785804,"randomize","Select the Wildcard to add to the text"]},{"id":61,"type":"ImpactWildcardProcessor","pos":[-11762.912109375,-3696.2431640625],"size":[400,222],"flags":{},"order":13,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[15],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{-3$$separator$$__test_wildcard/include_all/*__}","0, 1, 2separator0","populate",866104734173401,"randomize","Select the Wildcard to add to the text"]},{"id":62,"type":"ImpactWildcardProcessor","pos":[-11758.9052734375,-3379.462890625],"size":[400,222],"flags":{},"order":14,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[17],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/include_all/*__}","0","populate",906296272550597,"randomize","Select the Wildcard to add to the text"]},{"id":63,"type":"ImpactWildcardProcessor","pos":[-11763.3662109375,-3102.8525390625],"size":[400,222],"flags":{},"order":15,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[16],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-9999999$$separator$$__test_wildcard/include_all/*__}","bseparator0separator0 2","populate",619593260395734,"randomize","Select the Wildcard to add to the text"]},{"id":64,"type":"ImpactWildcardProcessor","pos":[-11762.7392578125,-2826.6669921875],"size":[400,222],"flags":{},"order":16,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[14],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-$$separator$$__test_wildcard/include_all/*__}","0separator0separatorbseparator0separator__test_wildcard/empty_item/*__separator1, 0","populate",1093508569142815,"randomize","Select the Wildcard to add to the text"]},{"id":55,"type":"ShowText|pysssss","pos":[-11292.8681640625,-2825.79443359375],"size":[330.10296630859375,263.9482116699219],"flags":{},"order":78,"mode":0,"inputs":[{"name":"text","type":"STRING","link":14,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0separator0separatorbseparator0separator__test_wildcard/empty_item/*__separator1, 0"]},{"id":72,"type":"MarkdownNote","pos":[-12062.1220703125,-2822.9970703125],"size":[281.3926086425781,206.91697692871094],"flags":{},"order":17,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-$$\n## expected \nseparator = sep \nprobability: len(result) \n all pattern\n\n## actual\nnot supported \nprobability: len(result) \n 1: 100%, 2: 0%, 3: 0%. 4: 0% "],"color":"#432","bgcolor":"#653"},{"id":67,"type":"MarkdownNote","pos":[-13354.5634765625,-2812.72412109375],"size":[281.3926086425781,206.91697692871094],"flags":{},"order":18,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-$$\n## expected \nseparator = \" \" \nprobability: len(result) \n all pattern\n \n## actual\nnot supported \nprobability: len(result) \n 1: 100%, 2: 0%, 3: 0%. 4: 0% "],"color":"#432","bgcolor":"#653"},{"id":65,"type":"MarkdownNote","pos":[-13352.6298828125,-3087.4697265625],"size":[284.8597717285156,218.01190185546875],"flags":{},"order":19,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-(itemcount <<<< n2)$$\n## expected \nseparator = \" \" \nprobability: len(result) \n all pattern\n \n## actual\nprobability: len(result) \n 1: 0%, 2: 0%, 3: 0%... max count: 99% \n"],"color":"#432","bgcolor":"#653"},{"id":71,"type":"MarkdownNote","pos":[-12064.3828125,-3106.1318359375],"size":[284.8597717285156,218.01190185546875],"flags":{},"order":20,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-(itemcount <<<< n2)$$\n## expected \nseparator = sep \nprobability: len(result) \n all pattern\n\n## actual\nprobability: len(result) \n 1: 0%, 2: 0%, 3: 0%... max count: 99% \n"],"color":"#432","bgcolor":"#653"},{"id":70,"type":"MarkdownNote","pos":[-12058.2548828125,-3357.92578125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":21,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-n2$$\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":66,"type":"MarkdownNote","pos":[-13350.6962890625,-3347.65380859375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":22,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: n1-n2$$\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":68,"type":"MarkdownNote","pos":[-13360.5498046875,-3671.63232421875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":23,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: -n$$\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":69,"type":"MarkdownNote","pos":[-12068.1083984375,-3681.904296875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":24,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# range: -n$$\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":123,"type":"ImpactWildcardProcessor","pos":[-13179.640625,-4570.8720703125],"size":[400,222],"flags":{},"order":25,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[32],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/reference_empty_item/*__}","__test_wildcard/empty_item/*__","populate",54896724395668,"randomize","Select the Wildcard to add to the text"]},{"id":117,"type":"ImpactWildcardProcessor","pos":[-13166.896484375,-4937.2978515625],"size":[400,222],"flags":{},"order":26,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[30],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/reference_single_text_only_wildcard/*__}","0","populate",391688626288181,"randomize","Select the Wildcard to add to the text"]},{"id":111,"type":"ImpactWildcardProcessor","pos":[-13198.759765625,-5275.04833984375],"size":[400,222],"flags":{},"order":27,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[28],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/reference_wildcard/*__}","0, 1, 2","populate",296622801038825,"randomize","Select the Wildcard to add to the text"]},{"id":105,"type":"ImpactWildcardProcessor","pos":[-13216.8203125,-5605.01171875],"size":[400,222],"flags":{},"order":28,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[26],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/raw_wildcard/*__}","2, 1 0 1 0 2","populate",533570818504916,"randomize","Select the Wildcard to add to the text"]},{"id":99,"type":"ImpactWildcardProcessor","pos":[-13216.818359375,-5917.849609375],"size":[400,222],"flags":{},"order":29,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[24],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/text_only/*__}","0, 1, 2","populate",115645098449181,"randomize","Select the Wildcard to add to the text"]},{"id":93,"type":"ImpactWildcardProcessor","pos":[-13170.4716796875,-6242.2744140625],"size":[400,222],"flags":{},"order":30,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[22],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/empty_text/*__}","","populate",232076456423613,"randomize","Select the Wildcard to add to the text"]},{"id":8,"type":"ImpactWildcardProcessor","pos":[-13181.53515625,-6576.26123046875],"size":[400,222],"flags":{},"order":31,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[4],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/single_text_only/*__}","0","populate",296246500565067,"randomize","Select the Wildcard to add to the text"]},{"id":29,"type":"ImpactWildcardProcessor","pos":[-11934.7646484375,-6590.73974609375],"size":[400,222],"flags":{},"order":32,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[7],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/single_text_only/*__}","0","populate",760939738881648,"randomize","Select the Wildcard to add to the text"]},{"id":94,"type":"ImpactWildcardProcessor","pos":[-11923.701171875,-6256.7529296875],"size":[400,222],"flags":{},"order":33,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[23],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/empty_text/*__}","","populate",536551955956330,"randomize","Select the Wildcard to add to the text"]},{"id":100,"type":"ImpactWildcardProcessor","pos":[-11970.0478515625,-5932.328125],"size":[400,222],"flags":{},"order":34,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[25],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/text_only/*__}","0separator0, 1, 2separator0, 1","populate",907373058546822,"randomize","Select the Wildcard to add to the text"]},{"id":106,"type":"ImpactWildcardProcessor","pos":[-11970.0498046875,-5619.490234375],"size":[400,222],"flags":{},"order":35,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[27],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/raw_wildcard/*__}","0, 1separator1 0 2separator0 2","populate",901647112171264,"randomize","Select the Wildcard to add to the text"]},{"id":112,"type":"ImpactWildcardProcessor","pos":[-11951.9892578125,-5289.52685546875],"size":[400,222],"flags":{},"order":36,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[29],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/reference_wildcard/*__}","0, 1, 2","populate",871387361943030,"randomize","Select the Wildcard to add to the text"]},{"id":118,"type":"ImpactWildcardProcessor","pos":[-11920.1259765625,-4951.7763671875],"size":[400,222],"flags":{},"order":37,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[31],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/reference_single_text_only_wildcard/*__}","0","populate",341019232391714,"randomize","Select the Wildcard to add to the text"]},{"id":124,"type":"ImpactWildcardProcessor","pos":[-11932.8701171875,-4585.3505859375],"size":[400,222],"flags":{},"order":38,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[33],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/reference_empty_item/*__}","__test_wildcard/empty_item/*__","populate",370036648464827,"randomize","Select the Wildcard to add to the text"]},{"id":128,"type":"ShowText|pysssss","pos":[-10112.162109375,-6622.95751953125],"size":[315,76],"flags":{},"order":93,"mode":0,"inputs":[{"name":"text","type":"STRING","link":34,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":129,"type":"ShowText|pysssss","pos":[-8865.3916015625,-6637.43603515625],"size":[315,76],"flags":{},"order":94,"mode":0,"inputs":[{"name":"text","type":"STRING","link":35,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0separator1"]},{"id":130,"type":"MarkdownNote","pos":[-9453.931640625,-6810.2490234375],"size":[536.965087890625,103.656005859375],"flags":{},"order":39,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$sep$$"],"color":"#432","bgcolor":"#653"},{"id":131,"type":"MarkdownNote","pos":[-10735.1103515625,-6824.38671875],"size":[536.965087890625,103.656005859375],"flags":{},"order":40,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# separator: range$$"],"color":"#432","bgcolor":"#653"},{"id":134,"type":"ImpactWildcardProcessor","pos":[-10562.9658203125,-6619.25341796875],"size":[400,222],"flags":{},"order":41,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[34],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/numberonly/type_int/*__}","0","populate",377802811662475,"randomize","Select the Wildcard to add to the text"]},{"id":135,"type":"ImpactWildcardProcessor","pos":[-9316.1953125,-6633.73193359375],"size":[400,222],"flags":{},"order":42,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[35],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/numberonly/type_int/*__}","0separator1","populate",753049590556052,"randomize","Select the Wildcard to add to the text"]},{"id":136,"type":"ShowText|pysssss","pos":[-10099.29296875,-6271.14697265625],"size":[315,76],"flags":{},"order":96,"mode":0,"inputs":[{"name":"text","type":"STRING","link":36,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":137,"type":"ShowText|pysssss","pos":[-8852.521484375,-6285.62548828125],"size":[315,76],"flags":{},"order":95,"mode":0,"inputs":[{"name":"text","type":"STRING","link":37,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0"]},{"id":138,"type":"MarkdownNote","pos":[-10900.9638671875,-6257.310546875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":43,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_int2/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":139,"type":"MarkdownNote","pos":[-9608.5224609375,-6267.58251953125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":44,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_int2/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":141,"type":"ImpactWildcardProcessor","pos":[-9303.326171875,-6281.92138671875],"size":[400,222],"flags":{},"order":45,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[37],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/numberonly/type_int2/*__}","0","populate",344234339391823,"randomize","Select the Wildcard to add to the text"]},{"id":140,"type":"ImpactWildcardProcessor","pos":[-10550.0966796875,-6267.44287109375],"size":[400,222],"flags":{},"order":46,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[36],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/numberonly/type_int2/*__}","0","populate",949173770490,"randomize","Select the Wildcard to add to the text"]},{"id":142,"type":"ShowText|pysssss","pos":[-10094.38671875,-5643.3115234375],"size":[315,76],"flags":{},"order":100,"mode":0,"inputs":[{"name":"text","type":"STRING","link":38,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","0 1"]},{"id":143,"type":"ShowText|pysssss","pos":[-8847.6171875,-5657.7900390625],"size":[315,76],"flags":{},"order":97,"mode":0,"inputs":[{"name":"text","type":"STRING","link":39,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1separator0"]},{"id":148,"type":"ShowText|pysssss","pos":[-10081.517578125,-5291.5009765625],"size":[315,76],"flags":{},"order":99,"mode":0,"inputs":[{"name":"text","type":"STRING","link":40,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1"]},{"id":149,"type":"ShowText|pysssss","pos":[-8834.7470703125,-5305.9794921875],"size":[315,76],"flags":{},"order":98,"mode":0,"inputs":[{"name":"text","type":"STRING","link":41,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1"]},{"id":147,"type":"ImpactWildcardProcessor","pos":[-9298.4208984375,-5654.0859375],"size":[400,222],"flags":{},"order":47,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[39],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/numberonly/type_str/*__}","1separator0","populate",508734479462831,"randomize","Select the Wildcard to add to the text"]},{"id":151,"type":"MarkdownNote","pos":[-9590.7470703125,-5287.9365234375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":48,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_str2/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":152,"type":"ImpactWildcardProcessor","pos":[-9285.5517578125,-5302.275390625],"size":[400,222],"flags":{},"order":49,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[41],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/numberonly/type_str2/*__}","1","populate",1067712563048875,"randomize","Select the Wildcard to add to the text"]},{"id":153,"type":"ImpactWildcardProcessor","pos":[-10532.3212890625,-5287.796875],"size":[400,222],"flags":{},"order":50,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[40],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/numberonly/type_str2/*__}","1","populate",902341396140779,"randomize","Select the Wildcard to add to the text"]},{"id":150,"type":"MarkdownNote","pos":[-10883.1884765625,-5277.66455078125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":51,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_str2/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":145,"type":"MarkdownNote","pos":[-9603.6162109375,-5639.7470703125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":52,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_str/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 50%, no throw error \n 2: 50%, no throw error \n"],"color":"#432","bgcolor":"#653"},{"id":144,"type":"MarkdownNote","pos":[-10896.0576171875,-5629.47509765625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":53,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_str/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 50%, no throw error \n 2: 50%, no throw error \n"],"color":"#432","bgcolor":"#653"},{"id":133,"type":"MarkdownNote","pos":[-10913.8330078125,-6609.12109375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":54,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_int/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 50%, no throw error \n 2: 50%, no throw error \n"],"color":"#432","bgcolor":"#653"},{"id":132,"type":"MarkdownNote","pos":[-9621.3916015625,-6619.39306640625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":55,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_int/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 50%, no throw error \n 2: 50%, no throw error \n"],"color":"#432","bgcolor":"#653"},{"id":146,"type":"ImpactWildcardProcessor","pos":[-10545.1904296875,-5639.607421875],"size":[400,222],"flags":{},"order":56,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[38],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/numberonly/type_str/*__}","0 1","populate",217160399221458,"randomize","Select the Wildcard to add to the text"]},{"id":154,"type":"ShowText|pysssss","pos":[-10087.2021484375,-5960.27001953125],"size":[315,76],"flags":{},"order":101,"mode":0,"inputs":[{"name":"text","type":"STRING","link":42,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1"]},{"id":155,"type":"ShowText|pysssss","pos":[-8840.431640625,-5974.74853515625],"size":[315,76],"flags":{},"order":102,"mode":0,"inputs":[{"name":"text","type":"STRING","link":43,"widget":{"name":"text"}}],"outputs":[{"name":"STRING","type":"STRING","links":null,"shape":6}],"properties":{"Node name for S&R":"ShowText|pysssss"},"widgets_values":["","1"]},{"id":156,"type":"MarkdownNote","pos":[-10888.873046875,-5946.43359375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":57,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_int3/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":159,"type":"ImpactWildcardProcessor","pos":[-10538.005859375,-5956.56591796875],"size":[400,222],"flags":{},"order":58,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[42],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$__test_wildcard/numberonly/type_int3/*__}","1","populate",939551364174087,"randomize","Select the Wildcard to add to the text"]},{"id":157,"type":"MarkdownNote","pos":[-9596.4326171875,-5956.70556640625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":59,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/numberonly/type_int3/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":158,"type":"ImpactWildcardProcessor","pos":[-9291.236328125,-5971.04443359375],"size":[400,222],"flags":{},"order":60,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","links":[43],"slot_index":0}],"properties":{"Node name for S&R":"ImpactWildcardProcessor"},"widgets_values":["{1-3$$separator$$__test_wildcard/numberonly/type_int3/*__}","1","populate",801077747724021,"randomize","Select the Wildcard to add to the text"]},{"id":95,"type":"MarkdownNote","pos":[-13515.75390625,-6241.0751953125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":61,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/empty_text/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n \"\": 100%, no throw error\n"],"color":"#432","bgcolor":"#653"},{"id":101,"type":"MarkdownNote","pos":[-13543.1142578125,-5911.0673828125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":62,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/text_only/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":102,"type":"MarkdownNote","pos":[-12275.244140625,-5917.9892578125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":63,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/text_only/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":108,"type":"MarkdownNote","pos":[-12275.24609375,-5605.1513671875],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":64,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/raw_wildcard/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":107,"type":"MarkdownNote","pos":[-13567.6875,-5594.87939453125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":65,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/raw_wildcard/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 33%, 2: 33%, 3: 33%\n"],"color":"#432","bgcolor":"#653"},{"id":113,"type":"MarkdownNote","pos":[-13549.626953125,-5264.916015625],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":66,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/reference_wildcard/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 100%"],"color":"#432","bgcolor":"#653"},{"id":114,"type":"MarkdownNote","pos":[-12257.185546875,-5275.18798828125],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":67,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/reference_wildcard/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 100%\n"],"color":"#432","bgcolor":"#653"},{"id":120,"type":"MarkdownNote","pos":[-12225.322265625,-4937.4375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":68,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_\\_wildcard/reference_single_text_only_wildcard/*\\_\\_\nexpected \nseparator = sep \nprobability: len(result) \n 1: 100%\n"],"color":"#432","bgcolor":"#653"},{"id":119,"type":"MarkdownNote","pos":[-13517.763671875,-4927.16552734375],"size":[284.1663513183594,190.96800231933594],"flags":{},"order":69,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# source: $$\\_\\_test\\_wildcard/reference_single_text_only_wildcard/*\\_\\_\nexpected \nseparator = \" \" \nprobability: len(result) \n 1: 100%"],"color":"#432","bgcolor":"#653"},{"id":161,"type":"MarkdownNote","pos":[-14064.0478515625,-5289.62060546875],"size":[459.7430725097656,304.9413146972656],"flags":{},"order":70,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["# Notice\n\n```\ntest_wildcard:\n text_only:\n - a\n - b\n - c\n\n # {1-3$$__test_wildcard/text_only/*__} return a|b|c only\n reference_wildcard:\n - __test_wildcard/text_only/*__\n\n # __test_wildcard/text_only/*__ return a|a b|a b c|...\n reference_wildcard_mutiple:\n - {1-3$$__test_wildcard/text_only/*__}\n\n```\n"],"color":"#432","bgcolor":"#653"}],"links":[[4,8,0,9,0,"STRING"],[7,29,0,25,0,"STRING"],[10,47,0,48,0,"STRING"],[11,54,0,50,0,"STRING"],[12,53,0,51,0,"STRING"],[13,49,0,52,0,"STRING"],[14,64,0,55,0,"STRING"],[15,61,0,56,0,"STRING"],[16,63,0,57,0,"STRING"],[17,62,0,58,0,"STRING"],[22,93,0,91,0,"STRING"],[23,94,0,92,0,"STRING"],[24,99,0,97,0,"STRING"],[25,100,0,98,0,"STRING"],[26,105,0,103,0,"STRING"],[27,106,0,104,0,"STRING"],[28,111,0,109,0,"STRING"],[29,112,0,110,0,"STRING"],[30,117,0,115,0,"STRING"],[31,118,0,116,0,"STRING"],[32,123,0,121,0,"STRING"],[33,124,0,122,0,"STRING"],[34,134,0,128,0,"STRING"],[35,135,0,129,0,"STRING"],[36,140,0,136,0,"STRING"],[37,141,0,137,0,"STRING"],[38,146,0,142,0,"STRING"],[39,147,0,143,0,"STRING"],[40,153,0,148,0,"STRING"],[41,152,0,149,0,"STRING"],[42,159,0,154,0,"STRING"],[43,158,0,155,0,"STRING"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.196764113929575,"offset":[14049.181343985432,5500.03266852812]},"node_versions":{"ComfyUI-Custom-Scripts":"bc8922deff73f59311c05cef27b9d4caaf43e87b","ComfyUI-Impact-Pack":"ebcb6f91abf4c8de1ab3636260959177615566ad"}},"version":0.4} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/loop-test.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/loop-test.json new file mode 100644 index 0000000000000000000000000000000000000000..273f9ad4f94fbb544fd196331db9dd206c0ef691 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/loop-test.json @@ -0,0 +1,1114 @@ +{ + "last_node_id": 43, + "last_link_id": 49, + "nodes": [ + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 413, + 389 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 415, + 186 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," + ] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 1451, + 189 + ], + "size": { + "0": 210, + "1": 270 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": {}, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + 26, + 474 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "V07_v07.safetensors" + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1209, + 188 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9, + 12 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 19, + "type": "ImpactMinMax", + "pos": [ + 2480, + 1160 + ], + "size": { + "0": 210, + "1": 78 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "*", + "link": 24 + }, + { + "name": "b", + "type": "*", + "link": 25, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactMinMax" + }, + "widgets_values": [ + false + ] + }, + { + "id": 15, + "type": "ImpactValueSender", + "pos": [ + 3520, + 1140 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "*", + "link": 39 + } + ], + "properties": { + "Node name for S&R": "ImpactValueSender" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 11, + "type": "ImageMaskSwitch", + "pos": [ + 1297, + 893 + ], + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images1", + "type": "IMAGE", + "link": 12 + }, + { + "name": "mask1_opt", + "type": "MASK", + "link": null + }, + { + "name": "images2_opt", + "type": "IMAGE", + "link": 11 + }, + { + "name": "mask2_opt", + "type": "MASK", + "link": null + }, + { + "name": "images3_opt", + "type": "IMAGE", + "link": null + }, + { + "name": "mask3_opt", + "type": "MASK", + "link": null + }, + { + "name": "images4_opt", + "type": "IMAGE", + "link": null + }, + { + "name": "mask4_opt", + "type": "MASK", + "link": null + }, + { + "name": "select", + "type": "INT", + "link": 43, + "widget": { + "name": "select", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 4, + "step": 1 + } + ] + }, + "slot_index": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageMaskSwitch" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 34, + "type": "ImpactConditionalBranch", + "pos": [ + 3264, + 1006 + ], + "size": { + "0": 210, + "1": 66 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "cond", + "type": "BOOLEAN", + "link": 36, + "slot_index": 0 + }, + { + "name": "tt_value", + "type": "*", + "link": 37 + }, + { + "name": "ff_value", + "type": "*", + "link": 38 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [ + 39 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactConditionalBranch" + } + }, + { + "id": 33, + "type": "ImpactInt", + "pos": [ + 3010, + 930 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactInt" + }, + "widgets_values": [ + 2 + ] + }, + { + "id": 35, + "type": "ImpactInt", + "pos": [ + 3000, + 1140 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 38 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactInt" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 473, + 609 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 256, + 256, + 1 + ] + }, + { + "id": 13, + "type": "ImageScaleBy", + "pos": [ + 1730, + 920 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 13 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 23, + 40 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScaleBy" + }, + "widgets_values": [ + "nearest-exact", + 1.2 + ] + }, + { + "id": 41, + "type": "ImpactConditionalStopIteration", + "pos": [ + 3607, + 774 + ], + "size": { + "0": 252, + "1": 26 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "cond", + "type": "BOOLEAN", + "link": 49 + } + ], + "properties": { + "Node name for S&R": "ImpactConditionalStopIteration" + } + }, + { + "id": 32, + "type": "ImpactCompare", + "pos": [ + 2760, + 1040 + ], + "size": { + "0": 210, + "1": 78 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "*", + "link": 47 + }, + { + "name": "b", + "type": "*", + "link": 34, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "BOOLEAN", + "type": "BOOLEAN", + "links": [ + 36, + 48 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactCompare" + }, + "widgets_values": [ + "a > b" + ] + }, + { + "id": 43, + "type": "ImpactNeg", + "pos": [ + 3210.6906854687495, + 698.6871511123657 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "BOOLEAN", + "link": 48 + } + ], + "outputs": [ + { + "name": "BOOLEAN", + "type": "BOOLEAN", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactNeg" + } + }, + { + "id": 10, + "type": "ImageReceiver", + "pos": [ + 641, + 932 + ], + "size": { + "0": 315, + "1": 200 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageReceiver" + }, + "widgets_values": [ + "ImgSender_temp_vxhgs_00001_.png [temp]", + 0 + ] + }, + { + "id": 24, + "type": "ImpactImageInfo", + "pos": [ + 2077, + 1117 + ], + "size": { + "0": 210, + "1": 86 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "IMAGE", + "link": 23 + } + ], + "outputs": [ + { + "name": "batch", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "width", + "type": "INT", + "links": [ + 25 + ], + "shape": 3 + }, + { + "name": "channel", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImpactImageInfo" + } + }, + { + "id": 42, + "type": "ImpactInt", + "pos": [ + 2483, + 983 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 47 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactInt" + }, + "widgets_values": [ + 768 + ] + }, + { + "id": 39, + "type": "ImpactValueReceiver", + "pos": [ + 1021, + 1137 + ], + "size": { + "0": 210, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "*", + "type": "*", + "links": [ + 43 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImpactValueReceiver" + }, + "widgets_values": [ + "INT", + 1, + 0 + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 872, + 217 + ], + "size": { + "0": 315, + "1": 474 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 901257808527154, + "fixed", + 5, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 36, + "type": "ImageSender", + "pos": [ + 2046, + -116 + ], + "size": [ + 914.2697004627885, + 989.0802794506753 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 40 + } + ], + "properties": { + "Node name for S&R": "ImageSender" + }, + "widgets_values": [ + "ImgSender", + 0 + ] + } + ], + "links": [ + [ + 1, + 4, + 0, + 3, + 0, + "MODEL" + ], + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 11, + 10, + 0, + 11, + 2, + "IMAGE" + ], + [ + 12, + 8, + 0, + 11, + 0, + "IMAGE" + ], + [ + 13, + 11, + 0, + 13, + 0, + "IMAGE" + ], + [ + 23, + 13, + 0, + 24, + 0, + "IMAGE" + ], + [ + 24, + 24, + 1, + 19, + 0, + "*" + ], + [ + 25, + 24, + 2, + 19, + 1, + "*" + ], + [ + 34, + 19, + 0, + 32, + 1, + "*" + ], + [ + 36, + 32, + 0, + 34, + 0, + "BOOLEAN" + ], + [ + 37, + 33, + 0, + 34, + 1, + "*" + ], + [ + 38, + 35, + 0, + 34, + 2, + "*" + ], + [ + 39, + 34, + 0, + 15, + 0, + "*" + ], + [ + 40, + 13, + 0, + 36, + 0, + "IMAGE" + ], + [ + 43, + 39, + 0, + 11, + 8, + "INT" + ], + [ + 47, + 42, + 0, + 32, + 0, + "*" + ], + [ + 48, + 32, + 0, + 43, + 0, + "BOOLEAN" + ], + [ + 49, + 43, + 0, + 41, + 0, + "BOOLEAN" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/masks.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/masks.json new file mode 100644 index 0000000000000000000000000000000000000000..a8a21a6b177559df88a74ef8a27c2b7c1e34d71b --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/masks.json @@ -0,0 +1,622 @@ +{ + "last_node_id": 38, + "last_link_id": 52, + "nodes": [ + { + "id": 21, + "type": "SEGSToImageList", + "pos": [ + 2160, + 970 + ], + "size": { + "0": 304.79998779296875, + "1": 46 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 41 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "link": 26, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 27 + ], + "shape": 6, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSToImageList" + } + }, + { + "id": 5, + "type": "MaskToSEGS", + "pos": [ + 1520, + 980 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 5 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 35, + 46 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToSEGS" + }, + "widgets_values": [ + "False", + 3, + "disabled", + 10 + ] + }, + { + "id": 36, + "type": "MasksToMaskList", + "pos": [ + 2270, + 680 + ], + "size": { + "0": 158.000244140625, + "1": 26 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "masks", + "type": "MASKS", + "link": 51 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 52 + ], + "shape": 6, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MasksToMaskList" + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 35, + "type": "MaskToImage", + "pos": [ + 2480, + 680 + ], + "size": { + "0": 176.39999389648438, + "1": 38.59991455078125 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 52 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 28, + "type": "Segs & Mask ForEach", + "pos": [ + 1800, + 980 + ], + "size": { + "0": 243.60000610351562, + "1": 46 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 35, + "slot_index": 0 + }, + { + "name": "masks", + "type": "MASKS", + "link": 43 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 41 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Segs & Mask ForEach" + } + }, + { + "id": 22, + "type": "PreviewImage", + "pos": [ + 2510, + 970 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 27 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 4, + "type": "LoadImage", + "pos": [ + 1150, + 460 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 26, + 47 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 5 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-416378.30000000075.png [input]", + "image" + ] + }, + { + "id": 33, + "type": "SAMDetectorSegmented", + "pos": [ + 1740, + 310 + ], + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "sam_model", + "type": "SAM_MODEL", + "link": 45 + }, + { + "name": "segs", + "type": "SEGS", + "link": 46 + }, + { + "name": "image", + "type": "IMAGE", + "link": 47 + } + ], + "outputs": [ + { + "name": "combined_mask", + "type": "MASK", + "links": [ + 44 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "batch_masks", + "type": "MASKS", + "links": [ + 43, + 51 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SAMDetectorSegmented" + }, + "widgets_values": [ + "center-1", + 0, + 0.7, + 0, + 0.7, + "False" + ] + }, + { + "id": 2, + "type": "SAMLoader", + "pos": [ + 1160, + 310 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ] + }, + { + "id": 6, + "type": "MaskToImage", + "pos": [ + 2300, + 310 + ], + "size": { + "0": 176.39999389648438, + "1": 26 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 44 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 7, + "type": "PreviewImage", + "pos": [ + 2720, + 310 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 8 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 9, + "type": "PreviewImage", + "pos": [ + 2720, + 680 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 50 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 38, + "type": "Note", + "pos": [ + 2032, + 698 + ], + "size": [ + 210, + 81.49969482421875 + ], + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "MasksToMaskList node introduced\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 37, + "type": "Note", + "pos": [ + 2071, + 384 + ], + "size": [ + 281.500244140625, + 65.09967041015625 + ], + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "type of batch_masks => MASKS instead of MASK\n" + ], + "color": "#432", + "bgcolor": "#653" + } + ], + "links": [ + [ + 5, + 4, + 1, + 5, + 0, + "MASK" + ], + [ + 8, + 6, + 0, + 7, + 0, + "IMAGE" + ], + [ + 26, + 4, + 0, + 21, + 1, + "IMAGE" + ], + [ + 27, + 21, + 0, + 22, + 0, + "IMAGE" + ], + [ + 35, + 5, + 0, + 28, + 0, + "SEGS" + ], + [ + 41, + 28, + 0, + 21, + 0, + "SEGS" + ], + [ + 43, + 33, + 1, + 28, + 1, + "MASKS" + ], + [ + 44, + 33, + 0, + 6, + 0, + "MASK" + ], + [ + 45, + 2, + 0, + 33, + 0, + "SAM_MODEL" + ], + [ + 46, + 5, + 0, + 33, + 1, + "SEGS" + ], + [ + 47, + 4, + 0, + 33, + 2, + "IMAGE" + ], + [ + 50, + 35, + 0, + 9, + 0, + "IMAGE" + ], + [ + 51, + 33, + 1, + 36, + 0, + "MASKS" + ], + [ + 52, + 36, + 0, + 35, + 0, + "MASK" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/regional_prompt.json b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/regional_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..3b5c8293f10599d3d46cd975c4e21778c6ebf76f --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/tests/workflows/regional_prompt.json @@ -0,0 +1,1625 @@ +{ + "last_node_id": 35, + "last_link_id": 65, + "nodes": [ + { + "id": 9, + "type": "EditBasicPipe", + "pos": [ + 1210, + 1030 + ], + "size": { + "0": 267, + "1": 126 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 60 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 13 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 16 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 15, + "type": "LoadImage", + "pos": [ + -240, + 1710 + ], + "size": { + "0": 900, + "1": 900 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1572044.0999999996.png [input]", + "image" + ] + }, + { + "id": 23, + "type": "LoadImage", + "pos": [ + -240, + 3790 + ], + "size": { + "0": 920, + "1": 910 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 31 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1351518.png [input]", + "image" + ] + }, + { + "id": 26, + "type": "EditBasicPipe", + "pos": [ + 1240, + 4180 + ], + "size": { + "0": 178, + "1": 126 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 59 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 34 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 33 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 17, + "type": "EditBasicPipe", + "pos": [ + 1550, + 1740 + ], + "size": { + "0": 178, + "1": 126 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 57 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 21 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 7, + "type": "VAEDecode", + "pos": [ + 3660, + 1820 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 63 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 8, + "type": "PreviewImage", + "pos": [ + 4020, + 1450 + ], + "size": { + "0": 1069.308349609375, + "1": 1128.923828125 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 10, + "type": "CLIPTextEncode", + "pos": [ + 860, + 1110 + ], + "size": { + "0": 292.0009765625, + "1": 115.41679382324219 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 61 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl black hair, upper knee, (cafe:1.1)" + ] + }, + { + "id": 22, + "type": "CombineRegionalPrompts", + "pos": [ + 2810, + 1860 + ], + "size": { + "0": 287.20001220703125, + "1": 106 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "regional_prompts1", + "type": "REGIONAL_PROMPTS", + "link": 48 + }, + { + "name": "regional_prompts2", + "type": "REGIONAL_PROMPTS", + "link": 49 + }, + { + "name": "regional_prompts3", + "type": "REGIONAL_PROMPTS", + "link": 50 + }, + { + "name": "regional_prompts4", + "type": "REGIONAL_PROMPTS", + "link": 64 + }, + { + "name": "regional_prompts5", + "type": "REGIONAL_PROMPTS", + "link": null + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 27 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CombineRegionalPrompts" + } + }, + { + "id": 12, + "type": "RegionalPrompt", + "pos": [ + 2030, + 1010 + ], + "size": { + "0": 418.1999816894531, + "1": 46 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 15 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 17 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 48 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 14, + "type": "EmptyLatentImage", + "pos": [ + 2740, + 1500 + ], + "size": { + "0": 350, + "1": 110 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 19 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 1104, + 1 + ] + }, + { + "id": 27, + "type": "CLIPTextEncode", + "pos": [ + 830, + 4260 + ], + "size": [ + 338.8743232727047, + 117.87075195312445 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 37 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl yellow pencil skirt, upper knee, (cafe:1.1)" + ] + }, + { + "id": 25, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1600, + 4180 + ], + "size": { + "0": 287.9136962890625, + "1": 106.45689392089844 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 33 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 13, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1563, + 1030 + ], + "size": { + "0": 355.20001220703125, + "1": 106 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 16 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 17 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 2, + "type": "RegionalSampler", + "pos": [ + 3260, + 1820 + ], + "size": { + "0": 323.1692810058594, + "1": 597.25439453125 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 19, + "slot_index": 0 + }, + { + "name": "base_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 10 + }, + { + "name": "regional_prompts", + "type": "REGIONAL_PROMPTS", + "link": 27 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalSampler" + }, + "widgets_values": [ + 1019854126263754, + "randomize", + 30, + 1, + 5 + ] + }, + { + "id": 5, + "type": "## make-basic_pipe [2c8c61]", + "pos": [ + -2547, + 2236 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "vae_opt", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "BASIC_PIPE", + "type": "BASIC_PIPE", + "links": [ + 1, + 3, + 62 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "## make-basic_pipe", + "properties": { + "Node name for S&R": "## make-basic_pipe [2c8c61]" + }, + "widgets_values": [ + "SD1.5/epicrealism_naturalSinRC1VAE.safetensors", + "a photograph of a girl is standing in the cafe terrace, looking viewer, upper knee", + "big head, closeup" + ] + }, + { + "id": 1, + "type": "LoadImage", + "pos": [ + -260, + 778 + ], + "size": { + "0": 915.1032104492188, + "1": 860.6505126953125 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1641138.7000000002.png [input]", + "image" + ] + }, + { + "id": 31, + "type": "CLIPTextEncode", + "pos": [ + 1230, + 2550 + ], + "size": { + "0": 292.0009765625, + "1": 115.41679382324219 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 56 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 51 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl, green tie, upper knee, (cafe:1.1)" + ] + }, + { + "id": 33, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1890, + 2470 + ], + "size": { + "0": 305.4067687988281, + "1": 106 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 53 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 30, + "type": "EditBasicPipe", + "pos": [ + 1610, + 2480 + ], + "size": { + "0": 178, + "1": 126 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 58 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 51 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 6, + "type": "FromBasicPipe", + "pos": [ + -1813, + 2226 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 3 + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "vae", + "type": "VAE", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe" + } + }, + { + "id": 34, + "type": "FromBasicPipe_v2", + "pos": [ + 699, + 2163 + ], + "size": { + "0": 267, + "1": 126 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 62 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 57, + 58, + 59, + 60 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "model", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 55, + 56, + 61 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 63 + ], + "shape": 3, + "slot_index": 3 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": null, + "shape": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe_v2" + } + }, + { + "id": 20, + "type": "RegionalPrompt", + "pos": [ + 2230, + 1720 + ], + "size": { + "0": 278.79998779296875, + "1": 57.09715270996094 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 28 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 23 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 18, + "type": "CLIPTextEncode", + "pos": [ + 1180, + 1820 + ], + "size": { + "0": 292.0009765625, + "1": 115.41679382324219 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 21 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl pink jacket, upper knee, (cafe:1.1)" + ] + }, + { + "id": 32, + "type": "RegionalPrompt", + "pos": [ + 2280, + 2450 + ], + "size": { + "0": 278.79998779296875, + "1": 57.09715270996094 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 65 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 52 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 64 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 24, + "type": "RegionalPrompt", + "pos": [ + 2040, + 4160 + ], + "size": { + "0": 278.79998779296875, + "1": 47.54190444946289 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 31 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 32 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 35, + "type": "LoadImage", + "pos": [ + -274, + 2727 + ], + "size": { + "0": 900, + "1": 900 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 65 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1594007.5999999996.png [input]", + "image" + ] + }, + { + "id": 21, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1840, + 1740 + ], + "size": { + "0": 305.4067687988281, + "1": 106 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 24 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 4, + "type": "KSamplerAdvancedProvider", + "pos": [ + 2742, + 1681 + ], + "size": { + "0": 355.20001220703125, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 1 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 5, + "dpm_fast", + "simple" + ] + } + ], + "links": [ + [ + 1, + 5, + 0, + 4, + 0, + "BASIC_PIPE" + ], + [ + 3, + 5, + 0, + 6, + 0, + "BASIC_PIPE" + ], + [ + 7, + 2, + 0, + 7, + 0, + "LATENT" + ], + [ + 9, + 7, + 0, + 8, + 0, + "IMAGE" + ], + [ + 10, + 4, + 0, + 2, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 13, + 10, + 0, + 9, + 4, + "CONDITIONING" + ], + [ + 15, + 1, + 1, + 12, + 0, + "MASK" + ], + [ + 16, + 9, + 0, + 13, + 0, + "BASIC_PIPE" + ], + [ + 17, + 13, + 0, + 12, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 19, + 14, + 0, + 2, + 0, + "LATENT" + ], + [ + 21, + 18, + 0, + 17, + 4, + "CONDITIONING" + ], + [ + 23, + 21, + 0, + 20, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 24, + 17, + 0, + 21, + 0, + "BASIC_PIPE" + ], + [ + 27, + 22, + 0, + 2, + 2, + "REGIONAL_PROMPTS" + ], + [ + 28, + 15, + 1, + 20, + 0, + "MASK" + ], + [ + 31, + 23, + 1, + 24, + 0, + "MASK" + ], + [ + 32, + 25, + 0, + 24, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 33, + 26, + 0, + 25, + 0, + "BASIC_PIPE" + ], + [ + 34, + 27, + 0, + 26, + 4, + "CONDITIONING" + ], + [ + 37, + 6, + 1, + 27, + 0, + "CLIP" + ], + [ + 48, + 12, + 0, + 22, + 0, + "REGIONAL_PROMPTS" + ], + [ + 49, + 20, + 0, + 22, + 1, + "REGIONAL_PROMPTS" + ], + [ + 50, + 24, + 0, + 22, + 2, + "REGIONAL_PROMPTS" + ], + [ + 51, + 31, + 0, + 30, + 4, + "CONDITIONING" + ], + [ + 52, + 33, + 0, + 32, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 53, + 30, + 0, + 33, + 0, + "BASIC_PIPE" + ], + [ + 55, + 34, + 2, + 18, + 0, + "CLIP" + ], + [ + 56, + 34, + 2, + 31, + 0, + "CLIP" + ], + [ + 57, + 34, + 0, + 17, + 0, + "BASIC_PIPE" + ], + [ + 58, + 34, + 0, + 30, + 0, + "BASIC_PIPE" + ], + [ + 59, + 34, + 0, + 26, + 0, + "BASIC_PIPE" + ], + [ + 60, + 34, + 0, + 9, + 0, + "BASIC_PIPE" + ], + [ + 61, + 34, + 2, + 10, + 0, + "CLIP" + ], + [ + 62, + 5, + 0, + 34, + 0, + "BASIC_PIPE" + ], + [ + 63, + 34, + 3, + 7, + 1, + "VAE" + ], + [ + 64, + 32, + 0, + 22, + 3, + "REGIONAL_PROMPTS" + ], + [ + 65, + 35, + 1, + 32, + 0, + "MASK" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/TROUBLESHOOTING.md b/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/TROUBLESHOOTING.md new file mode 100644 index 0000000000000000000000000000000000000000..f823f32c9afafb83e856637f0685c7d53e195a3b --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/TROUBLESHOOTING.md @@ -0,0 +1,72 @@ +## When a permission error occurs during the installation process (on Windows) + +* There are cases where the package you are trying to install is already being used by another custom node that has been loaded. + * This issue occurs only on Windows. +* Please close ComfyUI and execute install.py directly using Python in the custom_nodes/ComfyUI-Impact-Pack directory. + * In case **portable** version: + 1. goto **ComfyUI_windows_portable** directory in **cmd** + 2. execute ```.\python_embeded\python -s -m custom_nodes\ComfyUI-Impact-Pack\install.py``` + * In case **venv**: + 1. activate venv + 2. execute ```python -s -m custom_nodes\ComfyUI-Impact-Pack\install.py``` + * Others: + 1. Please modify the path of 'python' according to your Python environment. + 2. execute ```(YOUR PYTHON) -s -m custom_nodes\ComfyUI-Impact-Pack\install.py``` + + +## If the nodes of the Impact Pack hang during execution + +* During the execution of processes related to dilation, issues like this may arise depending on the compatibility of the computer environment. +* Please set `disable_gpu_opencv = True` in the `ComfyUI-Impact-Pack/impact-pack.ini` file. Occasionally, issues may arise when the OpenCV GPU mode is activated depending on the environment. + + e.g. +``` +[default] +dependency_version = 17 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +custom_wildcards = /home/me/github/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/custom_wildcards +disable_gpu_opencv = True +``` + +## An issue has occurred with importing Ultralytics. +``` + AttributeError: 'Logger' object has no attribute 'reconfigure' + + or + + AttributeError: 'Logger' object has no attribute 'encoding' +``` +* Update `ComfyUI-Manager` to V1.1.2 or above + + +## An issue has occurred about 'cv2' + +``` + AttributeError: module 'cv2' has no attribute 'setNumThreads' +``` + +* Update 'opencv-python' and 'opencv-python-headless' to latest version + * Once you update to the latest version, you can also downgrade back to 4.6.0.66 if needed. + * For the portable version, navigate to the portable installation directory in the command prompt, and enter the following command: + + ``` + .\python_embeded\python.exe -m pip install -U opencv-python opencv-python-headless + ``` + + * When using the WAS node suite or reactor nodes, using the latest version may not work as expected. You can downgrade using the following command: + + ``` + .\python_embeded\python.exe -m pip install -U opencv-python==4.6.0.66 opencv-python-headless==4.6.0.66 + ``` + + +## Destortion on Detailer + +* Please also note that this issue may be caused by a bug in xformers 0.0.18. If you encounter this problem, please try adjusting the guide_size parameter. + + + + +* guide_size changed from 256 -> 192 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/black1.png b/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/black1.png new file mode 100644 index 0000000000000000000000000000000000000000..fab0a221281c4b35edb935e4888fadd726598ea0 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/black1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e32fe1606d35a26ddf08d2a3ff24c8fcd62831b9ed11eeaa76468e27a2b5f0f +size 752880 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/black2.png b/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/black2.png new file mode 100644 index 0000000000000000000000000000000000000000..1441941a0dae5a1ed105e6db6b1b35356b054524 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/troubleshooting/black2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:829d72c3cc1034f72bbd0945e1a2aed69e1f38060126159c0b911c4c102e2fcc +size 709592 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/put_wildcards_here b/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/put_wildcards_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/samples/flower.txt b/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/samples/flower.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c732d35357631f87ab0dd938dba9db6bbe4cb80 --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/samples/flower.txt @@ -0,0 +1,9 @@ +rose +orchid +iris +carnation +lily +daisy +chrysanthemum +daffodil +dahlia \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/samples/jewel.txt b/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/samples/jewel.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e69d8be38eaaffe28c3186b5554d6e4d0fc689a --- /dev/null +++ b/zavodik/nodes/ComfyUI-Impact-Pack/wildcards/samples/jewel.txt @@ -0,0 +1,9 @@ +diamond +emerald +sapphire +opal +ruby +topaz +pearl +rubyamethyst +aquamarine \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/.github/workflows/publish.yml b/zavodik/nodes/ComfyUI-VideoHelperSuite/.github/workflows/publish.yml new file mode 100644 index 0000000000000000000000000000000000000000..8b92ef96c38b7437942f1764fc98964a177444ce --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/.github/workflows/publish.yml @@ -0,0 +1,24 @@ +name: Publish to Comfy registry +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "pyproject.toml" + +permissions: + issues: write + +jobs: + publish-node: + name: Publish Custom Node to registry + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'Kosinkadink' }} + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@v1 + with: + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} ## Add your own personal access token to your Github Repository secrets and reference it here. diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/.gitignore b/zavodik/nodes/ComfyUI-VideoHelperSuite/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5c7c13e881ebb88465736a5078d3e6c895925e05 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +# VIM swap files +*.swp diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/LICENSE b/zavodik/nodes/ComfyUI-VideoHelperSuite/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..3877ae0a7ff6f94ac222fd704e112723db776114 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/README.md b/zavodik/nodes/ComfyUI-VideoHelperSuite/README.md new file mode 100644 index 0000000000000000000000000000000000000000..15169af51764a76539ef6da790ca7babddfa9e6d --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/README.md @@ -0,0 +1,110 @@ +# ComfyUI-VideoHelperSuite +Nodes related to video workflows + +## I/O Nodes +### Load Video +Converts a video file into a series of images +- video: The video file to be loaded +- force_rate: Discards or duplicates frames as needed to hit a target frame rate. Disabled by setting to 0. This can be used to quickly match a suggested frame rate like the 8 fps of AnimateDiff. +- force_size: Allows for quick resizing to a number of suggested sizes. Several options allow you to set only width or height and determine the other from aspect ratio. +- frame_load_cap: The maximum number of frames which will be returned. This could also be thought of as the maximum batch size. +- skip_first_frames: How many frames to skip from the start of the video after adjusting for a forced frame rate. By incrementing this number by the frame_load_cap, you can easily process a longer input video in parts. +- select_every_nth: Allows for skipping a number of frames without considering the base frame rate or risking frame duplication. Often useful when working with animated gifs +A path variant of the Load Video node exists that allows loading videos from external paths + + +If [Advanced Previews](#advanced-previews) is enabled in the options menu of the web ui, the preview will reflect the current settings on the node. +### Load Image Sequence +Loads all image files from a subfolder. Options are similar to Load Video. +- image_load_cap: The maximum number of images which will be returned. This could also be thought of as the maximum batch size. +- skip_first_images: How many images to skip. By incrementing this number by image_load_cap, you can easily divide a long sequence of images into multiple batches. +- select_every_nth: Allows for skipping a number of images between every returned frame. + +A path variant of Load Image sequence also exists. +### Video Combine +Combines a series of images into an output video +If the optional audio input is provided, it will also be combined into the output video +- frame_rate: How many of the input frames are displayed per second. A higher frame rate means that the output video plays faster and has less duration. This should usually be kept to 8 for AnimateDiff, or matched to the force_rate of a Load Video node. +- loop_count: How many additional times the video should repeat +- filename_prefix: The base file name used for output. + - You can save output to a subfolder: `subfolder/video` + - Like the builtin Save Image node, you can add timestamps. `%date:yyyy-MM-ddThh:mm:ss%` might become 2023-10-31T6:45:25 +- format: The file format to use. Advanced information on configuring or adding additional video formats can be found in the [Video Formats](#video-formats) section. +- pingpong: Causes the input to be played back in the reverse to create a clean loop. +- save_output: Whether the image should be put into the output directory or the temp directory. +Returns: a `VHS_FILENAMES` which consists of a boolean indicating if save_output is enabled and a list of the full filepaths of all generated outputs in the order created. Accordingly `output[1][-1]` will be the most complete output. + +Depending on the format chosen, additional options may become available, including +- crf: Describes the quality of the output video. A lower number gives a higher quality video and a larger file size, while a higher number gives a lower quality video with a smaller size. Scaling varies by codec, but visually lossless output generally occurs around 20. +- save_metadata: Includes a copy of the workflow in the output video which can be loaded by dragging and dropping the video, just like with images. +- pix_fmt: Changes how the pixel data is stored. `yuv420p10le` has higher color quality, but won't work on all devices +### Load Audio +Provides a way to load standalone audio files. +- seek_seconds: An optional start time for the audio file in seconds. + +## Latent/Image Nodes +A number of utility nodes exist for managing latents. For each, there is an equivalent node which works on images. +### Split Batch +Divides the latents into two sets. The first `split_index` latents go to output A and the remainder to output B. If less then `split_index` latents are provided as input, all are passed to output A and output B is empty. +### Merge Batch +Combines two groups of latents into a single output. The order of the output is the latents in A followed by the latents in B. +If the input groups are not the same size, the node provides options for rescaling the latents before merging. +### Select Every Nth +The first of every `select_every_nth` input is passed and the remainder are discarded +### Get Count +### Duplicate Batch + +## Video Previews +Load Video (Upload), Load Video (Path), Load Images (Upload), Load Images (Path) and Video Combine provide animated previews. +Nodes with previews provide additional functionality when right clicked +- Open preview +- Save preview +- Pause preview: Can improve performance with very large videos +- Hide preview: Can improve performance, save space +- Sync preview: Restarts all previews for side-by-side comparisons + +### Advanced Previews +Advanced Previews must be manually enabled by clicking the settings gear next to Queue Prompt and checking the box for VHS Advanced Previews. +If enabled, videos which are displayed in the ui will be converted with ffmpeg on request. This has several benefits +- Previews for Load Video nodes will reflect the settings on the node such as skip_first_frames and frame_load_cap + - This makes it easy to select an exact portion of an input video and sync it with outputs +- It can use substantially less bandwidth if running the server remotely +- It can greatly improve the browser performance by downsizing videos to the in ui resolution, particularly useful with animated gifs +- It allows for previews of videos that would not normally be playable in browser. +- Can be limited to subdirectories of ComyUI if `VHS_STRICT_PATHS` is set as an environment variable. + +This fucntionality is disabled since it comes with several downsides +- There is a delay before videos show in the browser. This delay can become quite large if the input video is long +- The preview videos are lower quality (The original can always be viewed with Right Click -> Open preview) + +## Video Formats +Those familiar with ffmpeg are able to add json files to the video_formats folders to add new output types to Video Combine. +Consider the following example for av1-webm +```json +{ + "main_pass": + [ + "-n", "-c:v", "libsvtav1", + "-pix_fmt", "yuv420p10le", + "-crf", ["crf","INT", {"default": 23, "min": 0, "max": 100, "step": 1}] + ], + "audio_pass": ["-c:a", "libopus"], + "extension": "webm", + "environment": {"SVT_LOG": "1"} +} +``` +Most configuration takes place in `main_pass`, which is a list of arguments that are passed to ffmpeg. +- `"-n"` designates that the command should fail if a file of the same name already exists. This should never happen, but if some bug were to occur, it would ensure other files aren't overwritten. +- `"-c:v", "libsvtav1"` designates that the video should be encoded with an av1 codec using the new SVT-AV1 encoder. SVT-AV1 is much faster than libaom-av1, but may not exist in older versions of ffmpeg. Alternatively, av1_nvenc could be used for gpu encoding with newer nvidia cards. +- `"-pix_fmt", "yuv420p10le"` designates the standard pixel format with 10-bit color. It's important that some pixel format be specified to ensure a nonconfigurable input pix_fmt isn't used. + +`audio pass` contains a list of arguments which are passed to ffmpeg when audio is passed into Video Combine + +`extension` designates both the file extension and the container format that is used. If some of the above options are omitted from `main_pass` it can affect what default options are chosen. +`environment` can optionally be provided to set environment variables during execution. For av1 it's used to reduce the verbosity of logging so that only major errors are displayed. +`input_color_depth` effects the format in which pixels are passed to the ffmpeg subprocess. Current valid options are `8bit` and `16bit`. The later will produce higher quality output, but is experimental. + +Fields can be exposed in the webui as a widget using a format similar to what is used in the creation of custom nodes. In the above example, the argument for `-crf` will be exposed as a format widget in the webui. Format widgets are a list of up to 3 terms +- The name of the widget that will be displayed in the web ui +- Either a primitive such as "INT" or "BOOLEAN", or a list of string options +- A dictionary of options diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/__init__.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..815246deb017888fe254ab7df37818bd0c4ffb4e --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/__init__.py @@ -0,0 +1,9 @@ +from .videohelpersuite.nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS +import folder_paths +from .videohelpersuite.server import server +from .videohelpersuite import documentation +from .videohelpersuite import latent_preview + +WEB_DIRECTORY = "./web" +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] +documentation.format_descriptions(NODE_CLASS_MAPPINGS) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc4c724efb213ec058608b3f71d4a1a2b3b13d98 Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/pyproject.toml b/zavodik/nodes/ComfyUI-VideoHelperSuite/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..56617147a0f5996459896fb80921a4f03b94690b --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "comfyui-videohelpersuite" +description = "Nodes related to video workflows" +version = "1.7.9" +license = { file = "LICENSE" } +dependencies = ["opencv-python", "imageio-ffmpeg"] + +[project.urls] +Repository = "https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite" + +# Used by Comfy Registry https://comfyregistry.org +[tool.comfy] +PublisherId = "kosinkadink" +DisplayName = "ComfyUI-VideoHelperSuite" +Icon = "" diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/requirements.txt b/zavodik/nodes/ComfyUI-VideoHelperSuite/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..22b32b33290903a6ed55fabbfdb24e4f54a6a52f --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/requirements.txt @@ -0,0 +1,2 @@ +opencv-python +imageio-ffmpeg diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/README.md b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f12ec58d4190e8cbaac909510f77d8bb9bb764dd --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/README.md @@ -0,0 +1,5 @@ +Code to automate execution of the tests and evaluate the results. +Distributed as a `custom node`, and can be installed by copying or simlinking to the `custom_nodes` directory. +Requires that ffprobe be available and added to the path. Note that imageio-ffmpeg does not bundle ffprobe. + +When installed, it adds a new sidebar tab to automate running one, or a folder of tests. This requires that the `Use new menu and workflow management` setting not be disabled diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/__init__.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..93a6b43e84e566ccf709d58817afbccfdd968d66 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/__init__.py @@ -0,0 +1,6 @@ +from . import server +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + +WEB_DIRECTORY = "./web" +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/server.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/server.py new file mode 100644 index 0000000000000000000000000000000000000000..90d5c63c9850e6c52a69126cee209f284aded30f --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/server.py @@ -0,0 +1,60 @@ +import subprocess +import json +import os +import torch +import shutil + +import server +import folder_paths + +web = server.web + +@server.PromptServer.instance.routes.post("/VHS_test") +async def test(request): + try: + req_data = await request.json() + output = req_data['output']['gifs'][0] + filename = output['filename'] + typ = output['type'] + base_args = ["ffprobe", "-v", "error", '-count_packets', "-show_entries", "stream", "-of", "json"] + video = folder_paths.get_annotated_filepath(f'{filename} [{typ}]') + vprobe = json.loads(subprocess.run(base_args + ['-select_streams', 'v:0', video], + capture_output=True, check=True).stdout)['streams'][0] + aprobe = json.loads(subprocess.run(base_args + ['-select_streams', 'a:0', video], + capture_output=True, check=True).stdout)['streams'] + probe = {'video': vprobe} + if len(aprobe) > 0: + probe['audio'] = aprobe[0] + errors = [] + compare = None + for test in req_data['tests']: + if test['type'] == 'compare': + compare = test + continue + key = test['key'] + expected = test['value'] + actual = probe[test['type']][key] + if expected != actual: + #Consider always dumping type? + errors.append(f'{key}: {expected} != {actual}') + if len(errors) == 0 and compare is not None: + if not os.path.exists(compare['filename']): + os.makedirs(os.path.split(compare['filename'])[0], exist_ok=True) + shutil.copy(video, compare['filename']) + print("Missing comparison file has been initialized from output:", os.path.abspath(compare['filename'])) + else: + #NOTE: This does not include the full memory optimizations of VHS + #Tests should be small + #TODO: Figure out way to do opacity comparison. May need to do blending in python + #(easy, but slower and more memory intensive) + diff = subprocess.run(['ffmpeg', '-v', 'error', '-i', video, '-i', compare['filename'], '-filter_complex', 'blend=all_mode=grainextract', '-pix_fmt', 'rgb24', '-f', 'rawvideo', '-'], stdout=subprocess.PIPE, check=True).stdout + diff = torch.frombuffer(diff, dtype=torch.uint8).to(dtype=torch.float32).div_(255) + #diff = diff.reshape((-1,4)) + d = (diff-0.5).abs().sum()/diff.size(0) + if d > compare['tolerance']: + errors.append(f'Similarity is outside specified tolerance: {d}') + else: + print('d:', d) + return web.json_response(errors) + except Exception as e: + return web.json_response(str(e)) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/web/js/testRunner.js b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/web/js/testRunner.js new file mode 100644 index 0000000000000000000000000000000000000000..7ad4e2884acf30b65c85cfd33878e85c5b1cd8e6 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/testframework/web/js/testRunner.js @@ -0,0 +1,96 @@ + +import {app} from "../../../scripts/app.js"; +import {api} from "../../../scripts/api.js"; + +let watched_nodes = {} +let resolve = undefined +let testURL = api.apiURL("/VHS_test") +let errors = [] +api.addEventListener("executed", async function ({detail}) { + if (watched_nodes && watched_nodes[detail?.node]) { + if (detail?.output?.unfinished_batch) { + return + } + let requestBody = {tests: watched_nodes[detail.node], output: detail.output} + try { + let req = await fetch(api.apiURL("/VHS_test"), + {method: "POST", body: JSON.stringify(requestBody)}); + let testResult = await req.json() + if (testResult.length != 0) { + errors.push(testResult) + } + } catch(e) { + errors.push(e) + } + if (!(watched_nodes.length -= 1)) { + resolve() + } + } +}); + +const workflowService = app.extensionManager.workflow + +async function runTest(file) { + if (!file?.name?.endsWith(".json")) { + return false + } + let workflow = JSON.parse(await file.text()) + await app.loadGraphData(workflow) + //NOTE: API is not used so workflow data is actually processed + watched_nodes = workflow.tests + errors = [] + let p = new Promise((r) => resolve = r) + await app.queuePrompt() + //block until execution completes + await p + watched_nodes = {} + if (errors.length > 0) { + app.ui.dialog.show("Failed " + errors.length + " tests:\n" + errors) + return true + } + await workflowService.closeWorkflow(workflowService.activeWorkflow, {warnIfUnsaved: false}) + return false +} +let iconOverride = document.createElement("style") +iconOverride.innerHTML = `.VHSTestIcon:before {content: '🧪';}` +document.body.append(iconOverride) + +let testSidebar = {id: 'VHStest', title: 'VHS Test', icon: 'VHSTestIcon', type: 'custom', + render: (e) => { + e.innerHTML = `Select a folder containing tests + + Or select a single test + + ` + + const folderInput = e.children[0] + const fileInput = e.children[1] + Object.assign(folderInput, { + type: "file", + webkitdirectory: true, + onchange: async function() { + const startTime = Date.now() + let failedTests = false + for(const file of this.files) { + failedTests ||= await runTest(file) + } + this.value="" + if (!failedTests) { + console.log("All tests passed in " + ((Date.now() - startTime)/1000) + "s") + } + }, + }); + Object.assign(fileInput, { + type: "file", + accept: ".json", + onchange: async function() { + if (this.files.length) { + if(!(await runTest(this.files[0]))) { + console.log("Test complete") + } + this.value="" + } + }, + }); + }} +app.extensionManager.registerSidebarTab(testSidebar) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/README.md b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f5f175925de0f896f7db8ceb408aabd52f71e6a6 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/README.md @@ -0,0 +1 @@ +Workflows for automated testing of VHS. Most include an additional tests key to check the properties or perform comparisons on node outputs diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/audio.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/audio.json new file mode 100644 index 0000000000000000000000000000000000000000..5aff488a3867a78a6188f62c981fa742f9249e6d --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/audio.json @@ -0,0 +1,313 @@ +{ + "id": "07b812b5-5037-4878-90bc-32d3a1f36619", + "revision": 0, + "last_node_id": 7, + "last_link_id": 5, + "nodes": [ + { + "id": 5, + "type": "VHS_VideoCombine", + "pos": [ + 732, + -23 + ], + "size": [ + 210, + 334 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 2 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": 3 + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/webm", + "pix_fmt": "yuv420p", + "crf": 20, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": {} + } + } + }, + { + "id": 6, + "type": "VHS_VideoCombine", + "pos": [ + 503, + 363 + ], + "size": [ + 210, + 334 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 4 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": 5 + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": {} + } + } + }, + { + "id": 4, + "type": "VHS_LoadVideoPath", + "pos": [ + 29, + 16 + ], + "size": [ + 221.27618408203125, + 413.1552734375 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 2, + 4 + ] + }, + { + "name": "frame_count", + "type": "INT", + "links": null + }, + { + "name": "audio", + "type": "AUDIO", + "links": [ + 3 + ] + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideoPath" + }, + "widgets_values": { + "video": "input/bigbuckbunny.mp4", + "force_rate": 8, + "custom_width": 0, + "custom_height": 0, + "frame_load_cap": 30, + "skip_first_frames": 0, + "select_every_nth": 1, + "format": "AnimateDiff", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "input/bigbuckbunny.mp4", + "type": "path", + "format": "video/mp4", + "force_rate": 8, + "custom_width": 0, + "custom_height": 0, + "frame_load_cap": 30, + "skip_first_frames": 0, + "select_every_nth": 1 + } + } + } + }, + { + "id": 7, + "type": "VHS_LoadAudio", + "pos": [ + 83, + 564 + ], + "size": [ + 218.93820190429688, + 126 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "audio", + "type": "AUDIO", + "links": [ + 5 + ] + }, + { + "name": "duration", + "type": "FLOAT", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_LoadAudio" + }, + "widgets_values": { + "audio_file": "input/bigbuckbunny.mp4", + "seek_seconds": 0, + "duration": 5 + } + } + ], + "links": [ + [ + 2, + 4, + 0, + 5, + 0, + "IMAGE" + ], + [ + 3, + 4, + 2, + 5, + 1, + "AUDIO" + ], + [ + 4, + 4, + 0, + 6, + 0, + "IMAGE" + ], + [ + 5, + 7, + 0, + 6, + 1, + "AUDIO" + ] + ], + "groups": [], + "config": {}, + "extra": { + "frontendVersion": "1.25.0", + "VHS_latentpreview": true, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4, + "tests": { + "5": [{"type": "audio", "key": "nb_read_packets", "value": "177"}], + "6": [{"type": "audio", "key": "nb_read_packets", "value": "176"}], + "length": 2 + } +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/batch4x4.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/batch4x4.json new file mode 100644 index 0000000000000000000000000000000000000000..ccd75770ba21d95d7731197012dcd273093c0bc6 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/batch4x4.json @@ -0,0 +1,535 @@ +{ + "last_node_id": 18, + "last_link_id": 28, + "nodes": [ + { + "id": 16, + "type": "SolidMask", + "pos": [ + 38, + 1066 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 18 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "SolidMask" + }, + "widgets_values": [ + 1, + 512, + 512 + ] + }, + { + "id": 15, + "type": "ImageCompositeMasked", + "pos": [ + 412.0800030517579, + 591.4099975585939 + ], + "size": { + "0": 315, + "1": 146 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "destination", + "type": "IMAGE", + "link": 16 + }, + { + "name": "source", + "type": "IMAGE", + "link": 17 + }, + { + "name": "mask", + "type": "MASK", + "link": 18 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 19 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageCompositeMasked" + }, + "widgets_values": [ + 0, + 0, + false + ] + }, + { + "id": 18, + "type": "VHS_BatchManager", + "pos": [ + 329.8000881958008, + -87.40008056640622 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VHS_BatchManager", + "type": "VHS_BatchManager", + "links": [ + 24, + 25, + 26, + 27 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_BatchManager" + }, + "widgets_values": { + "frames_per_batch": 4, + "count": 2 + } + }, + { + "id": 10, + "type": "VHS_LoadVideoPath", + "pos": [ + 24, + 102 + ], + "size": [ + 320, + 420.7188019966722 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": 25, + "slot_index": 0 + }, + { + "name": "vae", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 16, + 22 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "AUDIO", + "links": [ + 28 + ], + "slot_index": 2, + "shape": 3 + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideoPath" + }, + "widgets_values": { + "video": "input/bigbuckbunny.mp4", + "force_rate": 8, + "force_size": "512x?", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 16, + "skip_first_frames": 64, + "select_every_nth": 1, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 16, + "skip_first_frames": 64, + "force_rate": 8, + "filename": "input/bigbuckbunny.mp4", + "type": "path", + "format": "video/mp4", + "select_every_nth": 1, + "force_size": "512x?" + } + } + } + }, + { + "id": 14, + "type": "VHS_LoadVideoPath", + "pos": [ + 26, + 598 + ], + "size": [ + 320, + 444.7188019966722 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": 26, + "slot_index": 0 + }, + { + "name": "vae", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 17 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "VHS_AUDIO", + "links": [], + "slot_index": 2, + "shape": 3 + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideoPath" + }, + "widgets_values": { + "video": "input/bigbuckbunny.mp4", + "force_rate": 8, + "force_size": "Custom Width", + "custom_width": 384, + "custom_height": 512, + "frame_load_cap": 16, + "skip_first_frames": 64, + "select_every_nth": 1, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 16, + "skip_first_frames": 64, + "force_rate": 8, + "filename": "input/bigbuckbunny.mp4", + "type": "path", + "format": "video/mp4", + "select_every_nth": 1, + "force_size": "512x?" + } + } + } + }, + { + "id": 11, + "type": "VHS_VideoCombine", + "pos": [ + 762, + 646 + ], + "size": [ + 320, + 492.75 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 19 + }, + { + "name": "audio", + "type": "AUDIO", + "link": 28 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": 27, + "slot_index": 2 + }, + { + "name": "vae", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00005-audio.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + } + } + } + }, + { + "id": 17, + "type": "VHS_VideoCombine", + "pos": [ + 756, + 100 + ], + "size": [ + 320, + 492.75 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 22 + }, + { + "name": "audio", + "type": "VHS_AUDIO", + "link": null + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": 24, + "slot_index": 2 + }, + { + "name": "vae", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/webm", + "pix_fmt": "yuv420p", + "crf": 20, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00004.webm", + "subfolder": "", + "type": "temp", + "format": "video/webm", + "frame_rate": 8 + } + } + } + } + ], + "links": [ + [ + 16, + 10, + 0, + 15, + 0, + "IMAGE" + ], + [ + 17, + 14, + 0, + 15, + 1, + "IMAGE" + ], + [ + 18, + 16, + 0, + 15, + 2, + "MASK" + ], + [ + 19, + 15, + 0, + 11, + 0, + "IMAGE" + ], + [ + 22, + 10, + 0, + 17, + 0, + "IMAGE" + ], + [ + 24, + 18, + 0, + 17, + 2, + "VHS_BatchManager" + ], + [ + 25, + 18, + 0, + 10, + 0, + "VHS_BatchManager" + ], + [ + 26, + 18, + 0, + 14, + 0, + "VHS_BatchManager" + ], + [ + 27, + 18, + 0, + 11, + 2, + "VHS_BatchManager" + ], + [ + 28, + 10, + 2, + 11, + 1, + "VHS_AUDIO" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4, + "tests": { + "17": [{"type": "video", "key": "nb_read_packets", "value": "16"}], + "11": [{"type": "video", "key": "nb_read_packets", "value": "16"}, + {"type": "compare", "filename": "custom_nodes/ComfyUI-VideoHelperSuite/tests/outputs/batch.mp4", "tolerance": 0.02} + ], + "length": 1 + } +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/converted-format-input.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/converted-format-input.json new file mode 100644 index 0000000000000000000000000000000000000000..fe1d697534e184d0c27ba13fa1207fa8229ef3ee --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/converted-format-input.json @@ -0,0 +1,253 @@ +{ + "last_node_id": 19, + "last_link_id": 20, + "nodes": [ + { + "id": 18, + "type": "PrimitiveNode", + "pos": [ + 318, + 618 + ], + "size": [ + 210, + 82 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 18 + ], + "widget": { + "name": "crf" + } + } + ], + "properties": { + "Run widget replace on values": false + }, + "widgets_values": [ + 60, + "fixed" + ] + }, + { + "id": 11, + "type": "LoadImage", + "pos": [ + 260.4530029296875, + 233.2003173828125 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 19 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "example.png", + "image" + ] + }, + { + "id": 17, + "type": "VHS_VideoCombine", + "pos": [ + 733.3749389648438, + 338.28924560546875 + ], + "size": [ + 222.91415405273438, + 522.9141845703125 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 19 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + }, + { + "name": "crf", + "type": "INT", + "link": 18, + "widget": { + "name": "crf" + } + }, + { + "name": "pix_fmt", + "type": [ + "yuv420p", + "yuv420p10le" + ], + "link": 20, + "widget": { + "name": "pix_fmt" + } + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p10le", + "crf": 60, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00001.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 19, + "type": "PrimitiveNode", + "pos": [ + 300, + 760 + ], + "size": [ + 290, + 110 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "COMBO", + "type": "COMBO", + "links": [ + 20 + ], + "widget": { + "name": "pix_fmt" + } + } + ], + "properties": { + "Run widget replace on values": false + }, + "widgets_values": [ + "yuv420p10le", + "fixed", + "" + ] + } + ], + "links": [ + [ + 18, + 18, + 0, + 17, + 4, + "INT" + ], + [ + 19, + 11, + 0, + 17, + 0, + "IMAGE" + ], + [ + 20, + 19, + 0, + 17, + 5, + [ + "yuv420p", + "yuv420p10le" + ] + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.8264462809917354, + "offset": [ + 45.8650452880864, + -157.46987175292935 + ] + } + }, + "version": 0.4, + "tests": { + "17": [{"type": "video", "key": "pix_fmt", "value": "yuv420p10le"} + ], + "length": 1 + } +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/converted-input.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/converted-input.json new file mode 100644 index 0000000000000000000000000000000000000000..d2c857b5a8dcfcddff8a5c0afa455affc2539c5e --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/converted-input.json @@ -0,0 +1,307 @@ +{ + "last_node_id": 20, + "last_link_id": 23, + "nodes": [ + { + "id": 18, + "type": "VHS_VideoInfoLoaded", + "pos": [ + 424, + 427 + ], + "size": { + "0": 304.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "link": 19 + } + ], + "outputs": [ + { + "name": "fps🟦", + "type": "FLOAT", + "links": [ + 20 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count🟦", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "duration🟦", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "width🟦", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height🟦", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoInfoLoaded" + }, + "widgets_values": {} + }, + { + "id": 17, + "type": "VHS_VideoCombine", + "pos": [ + 783, + 223 + ], + "size": [ + 315, + 286 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 21 + }, + { + "name": "audio", + "type": "VHS_AUDIO", + "link": null + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "frame_rate", + "type": "FLOAT", + "link": 20, + "widget": { + "name": "frame_rate" + } + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00001.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4" + } + } + } + }, + { + "id": 16, + "type": "VHS_LoadVideoPath", + "pos": [ + 96, + 230 + ], + "size": [ + 315, + 449.5 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "frame_load_cap", + "type": "INT", + "link": 23, + "slot_index": 1, + "widget": { + "name": "frame_load_cap" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 21 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "VHS_AUDIO", + "links": null, + "shape": 3 + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": [ + 19 + ], + "slot_index": 3, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideoPath" + }, + "widgets_values": { + "video": "input/leader.webm", + "force_rate": 0, + "force_size": "Disabled", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 64, + "skip_first_frames": 0, + "select_every_nth": 1, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 64, + "skip_first_frames": 0, + "force_rate": 0, + "filename": "input/leader.webm", + "type": "path", + "format": "video/webm", + "select_every_nth": 1 + } + } + } + }, + { + "id": 20, + "type": "PrimitiveNode", + "pos": [ + -240, + 170 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 23 + ], + "widget": { + "name": "frame_load_cap" + } + } + ], + "properties": { + "Run widget replace on values": false + }, + "widgets_values": [ + 64, + "fixed" + ] + } + ], + "links": [ + [ + 19, + 16, + 3, + 18, + 0, + "VHS_VIDEOINFO" + ], + [ + 20, + 18, + 0, + 17, + 3, + "FLOAT" + ], + [ + 21, + 16, + 0, + 17, + 0, + "IMAGE" + ], + [ + 23, + 20, + 0, + 16, + 1, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4, + "tests": { + "17": [{"type": "video", "key": "width", "value": 1440}, + {"type": "video", "key": "height", "value": 1080}, + {"type": "video", "key": "nb_read_packets", "value": "64"} + ], + "length": 1 + } +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/loop.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/loop.json new file mode 100644 index 0000000000000000000000000000000000000000..ac40a2f96c053437d3f457da2a07e5c0ffa9f260 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/loop.json @@ -0,0 +1,178 @@ +{ + "last_node_id": 3, + "last_link_id": 1, + "nodes": [ + { + "id": 1, + "type": "VHS_LoadVideo", + "pos": { + "0": 54, + "1": 89 + }, + "size": [ + 260, + 460 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "VHS_AUDIO", + "links": null, + "shape": 3 + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideo" + }, + "widgets_values": { + "video": "leader.webm", + "force_rate": 8, + "force_size": "Disabled", + "custom_width": 304, + "custom_height": 312, + "frame_load_cap": 16, + "skip_first_frames": 1, + "select_every_nth": 1, + "choose video to upload": "image", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 16, + "skip_first_frames": 1, + "force_rate": 8, + "filename": "leader.webm", + "type": "input", + "format": "video/mp4", + "force_size": "410.4x?", + "select_every_nth": 1 + } + } + } + }, + { + "id": 3, + "type": "VHS_VideoCombine", + "pos": { + "0": 629, + "1": 222 + }, + "size": [ + 320, + 550 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 1, + "filename_prefix": "AnimateDiff", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00005.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + } + } + } + } + ], + "links": [ + [ + 1, + 1, + 0, + 3, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4, + "tests": { + "3": [{"type": "video", "key": "nb_read_packets", "value": "32"}], + "length": 1 + } +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/old-prores.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/old-prores.json new file mode 100644 index 0000000000000000000000000000000000000000..6d701d9a776d202895b592e816d9a56d27b6588f --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/old-prores.json @@ -0,0 +1,423 @@ +{ + "id": "bc7448ba-118f-4226-95ab-32227992f954", + "revision": 0, + "last_node_id": 6, + "last_link_id": 4, + "nodes": [ + { + "id": 1, + "type": "VHS_LoadVideo", + "pos": [ + 54, + 89 + ], + "size": [ + 245.1999969482422, + 492.7751770019531 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 1, + 2, + 3, + 4 + ] + }, + { + "name": "frame_count", + "type": "INT", + "links": null + }, + { + "name": "audio", + "type": "AUDIO", + "links": null + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideo" + }, + "widgets_values": { + "video": "leader.webm", + "force_rate": 8, + "custom_width": 512, + "custom_height": 0, + "frame_load_cap": 64, + "skip_first_frames": 1, + "select_every_nth": 1, + "format": "AnimateDiff", + "choose video to upload": "image", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 64, + "skip_first_frames": 0, + "force_rate": 8, + "filename": "leader.webm", + "type": "input", + "format": "video/mp4", + "select_every_nth": 1 + } + } + } + }, + { + "id": 4, + "type": "VHS_VideoCombine", + "pos": [ + 630.9500122070312, + 136.90997314453125 + ], + "size": [ + 315, + 497.25 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 2 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/ProRes", + "profile": "2", + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00004.mov", + "subfolder": "", + "type": "temp", + "format": "video/ProRes", + "frame_rate": 8, + "workflow": "AnimateDiff_00004.png" + } + } + } + }, + { + "id": 3, + "type": "VHS_VideoCombine", + "pos": [ + 635.0499267578125, + -407.1000061035156 + ], + "size": [ + 315, + 497.25 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/ProRes", + "profile": "1", + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00001.mov", + "subfolder": "", + "type": "temp", + "format": "video/ProRes", + "frame_rate": 8, + "workflow": "AnimateDiff_00001.png" + } + } + } + }, + { + "id": 5, + "type": "VHS_VideoCombine", + "pos": [ + 974.6401977539062, + -409.33984375 + ], + "size": [ + 315, + 497.25 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 3 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/ProRes", + "profile": "3", + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00002.mov", + "subfolder": "", + "type": "temp", + "format": "video/ProRes", + "frame_rate": 8, + "workflow": "AnimateDiff_00002.png" + } + } + } + }, + { + "id": 6, + "type": "VHS_VideoCombine", + "pos": [ + 968.7000122070312, + 138.7698974609375 + ], + "size": [ + 315, + 497.25 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 4 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/ProRes", + "profile": "4", + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00003.mov", + "subfolder": "", + "type": "temp", + "format": "video/ProRes", + "frame_rate": 8 + } + } + } + } + ], + "links": [ + [ + 1, + 1, + 0, + 3, + 0, + "IMAGE" + ], + [ + 2, + 1, + 0, + 4, + 0, + "IMAGE" + ], + [ + 3, + 1, + 0, + 5, + 0, + "IMAGE" + ], + [ + 4, + 1, + 0, + 6, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "frontendVersion": "1.17.0", + "VHS_latentpreview": true, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4, + "tests": { + "6": [{"type": "video", "key": "pix_fmt", "value": "yuv444p12le"}], + "length": 1 + } +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/old-vae-conversion.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/old-vae-conversion.json new file mode 100644 index 0000000000000000000000000000000000000000..7775ca0487334b4124bfdbf005b746094825ee21 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/old-vae-conversion.json @@ -0,0 +1,229 @@ +{ + "last_node_id": 10, + "last_link_id": 9, + "nodes": [ + { + "id": 7, + "type": "VHS_VideoCombine", + "pos": [ + 746, + 309 + ], + "size": [ + 320, + 468.7188019966722 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "LATENT", + "link": 9 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/webm", + "crf": 20, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "AnimateDiff_00001.webm", + "subfolder": "", + "type": "temp", + "format": "video/webm", + "frame_rate": 8 + } + } + } + }, + { + "id": 6, + "type": "VHS_LoadVideoPath", + "pos": [ + 171, + 239 + ], + "size": [ + 320, + 420.7188019966722 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": 7, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 9 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "AUDIO", + "links": [], + "slot_index": 2, + "shape": 3 + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideoPath" + }, + "widgets_values": { + "video": "input/bigbuckbunny.mp4", + "force_rate": 8, + "force_size": "Disabled", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 64, + "skip_first_frames": 0, + "select_every_nth": 1, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 64, + "skip_first_frames": 0, + "force_rate": 8, + "filename": "input/bigbuckbunny.mp4", + "type": "path", + "format": "video/mp4", + "select_every_nth": 1 + } + } + } + }, + { + "id": 10, + "type": "VAELoader", + "pos": [ + -42, + 88 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 7, + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "taesd" + ] + } + ], + "links": [ + [ + 7, + 10, + 0, + 6, + 1, + "VAE" + ], + [ + 8, + 10, + 0, + 7, + 3, + "VAE" + ], + [ + 9, + 6, + 0, + 7, + 0, + "LATENT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4, + "tests": { + "7": [{"type": "video", "key": "width", "value": 1920}, + {"type": "video", "key": "height", "value": 1080}, + {"type": "video", "key": "nb_read_packets", "value": "64"} + ], + "length": 1 + } +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/simple.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/simple.json new file mode 100644 index 0000000000000000000000000000000000000000..985023758b7e55ccc53a65fef9916d912215ed48 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/tests/simple.json @@ -0,0 +1,134 @@ +{ + "last_node_id": 3, + "last_link_id": 1, + "nodes": [ + { + "id": 1, + "type": "VHS_LoadVideo", + "pos": [ + 54, + 89 + ], + "size": [ + 235.1999969482422, + 384.56999829610186 + ], + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "VHS_AUDIO", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideo" + }, + "widgets_values": { + "video": "leader.webm", + "force_rate": 8, + "force_size": "Custom Width", + "custom_width": 304, + "custom_height": 312, + "frame_load_cap": 16, + "skip_first_frames": 1, + "select_every_nth": 1, + "choose video to upload": "image", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 0, + "skip_first_frames": 0, + "force_rate": 0, + "filename": "leader.webm", + "type": "input", + "format": "video/mp4", + "force_size": "410.4x?", + "select_every_nth": 1 + } + } + } + }, + { + "id": 3, + "type": "VHS_VideoCombine", + "pos": [ + 629, + 222 + ], + "size": { + "0": 315, + "1": 250 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1 + } + ], + "outputs": [], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "AnimateDiff", + "format": "video/webm", + "pingpong": false, + "save_image": false, + "crf": 20, + "save_metadata": false, + "audio_file": "", + "videopreview": { + "hidden": false, + "paused": false + } + } + } + ], + "links": [ + [ + 1, + 1, + 0, + 3, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4, + "tests": { + "3": [{"type": "video", "key": "width", "value": 304}, + {"type": "video", "key": "height", "value": 232}, + {"type": "compare", "filename": "custom_nodes/ComfyUI-VideoHelperSuite/tests/outputs/simple.webm", "tolerance": 0.02} + ], + "length": 1 + } +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/16bit-png.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/16bit-png.json new file mode 100644 index 0000000000000000000000000000000000000000..04da16a8d7985e99bdf181c1d61912d77ff4fc65 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/16bit-png.json @@ -0,0 +1,9 @@ +{ + "main_pass": + [ + "-n", + "-pix_fmt", "rgba64" + ], + "input_color_depth": "16bit", + "extension": "%03d.png" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/8bit-png.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/8bit-png.json new file mode 100644 index 0000000000000000000000000000000000000000..b4144fe86ef69e1a8ece9289581255f575c30930 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/8bit-png.json @@ -0,0 +1,7 @@ +{ + "main_pass": + [ + "-n" + ], + "extension": "%03d.png" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ProRes.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ProRes.json new file mode 100644 index 0000000000000000000000000000000000000000..2588f3b0f8deca9cac57de44499a9f17a889709a --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ProRes.json @@ -0,0 +1,22 @@ +{ + "main_pass": + [ + "-n", "-c:v", "prores_ks", + "-profile:v", [["$profile"]], + ["profile", { + "lt": [[]], "1": [[]], "standard": [[]], "2": [[]], "hq": [[]], "3": [[]], + "4": ["has_alpha", {"True": [["-pix_fmt", "yuva444p10le"]], + "False": [["-pix_fmt", "yuv444p10le"]]}], + "4444": ["has_alpha", {"True": [["-pix_fmt", "yuva444p10le"]], + "False": [["-pix_fmt", "yuv444p10le"]]}], + "4444xq": ["has_alpha", {"True": [["-pix_fmt", "yuva444p10le"]], + "False": [["-pix_fmt", "yuv444p10le"]]}] + }], + "-vf", "scale=out_color_matrix=bt709", + "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "pcm_s16le"], + "extension": "mov", + "extra_widgets": [["profile", ["lt", "standard", "hq", "4444", "4444xq"], {"default": "hq"}]] +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/av1-webm.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/av1-webm.json new file mode 100644 index 0000000000000000000000000000000000000000..830db3a9a182505cf210a1cc4468060f78f20f86 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/av1-webm.json @@ -0,0 +1,16 @@ +{ + "main_pass": + [ + "-n", "-c:v", "libsvtav1", + "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]], + "-crf", ["crf","INT", {"default": 23, "min": 0, "max": 100, "step": 1}], + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "libopus"], + "input_color_depth": ["input_color_depth", ["8bit", "16bit"]], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "extension": "webm", + "environment": {"SVT_LOG": "1"} +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ffmpeg-gif.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ffmpeg-gif.json new file mode 100644 index 0000000000000000000000000000000000000000..54832565f0b6e5d1b08449995d308d73f99b8c68 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ffmpeg-gif.json @@ -0,0 +1,8 @@ +{ + "main_pass": + [ + "-n", + "-filter_complex", ["dither", ["bayer", "heckbert", "floyd_steinberg", "sierra2", "sierra2_4a", "sierra3", "burkes", "atkinson", "none"], {"default": "sierra2_4a"}, "[0:v] split [a][b]; [a] palettegen=reserve_transparent=on:transparency_color=ffffff [p]; [b][p] paletteuse=dither=$val"] + ], + "extension": "gif" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ffv1-mkv.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ffv1-mkv.json new file mode 100644 index 0000000000000000000000000000000000000000..4c21b4167f1e4bbdeb26ab9706f8d94b6d36462f --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/ffv1-mkv.json @@ -0,0 +1,18 @@ +{ + "main_pass": [ + "-n", + "-c:v", "ffv1", + "-level", ["level", ["0", "1", "3"], {"default": "3"}], + "-coder", ["coder", ["0", "1", "2"], {"default": "1"}], + "-context", ["context", ["0", "1"], {"default": "1"}], + "-g", ["gop_size", "INT", {"default": 1, "min": 1, "max": 300, "step": 1}], + "-slices", ["slices", ["4", "6", "9", "12", "16", "20", "24", "30"], {"default": "16"}], + "-slicecrc", ["slicecrc", ["0", "1"], {"default": "1"}], + "-pix_fmt", ["pix_fmt", ["rgba64le", "bgra", "yuv420p", "yuv422p", "yuv444p", "yuva420p", "yuva422p", "yuva444p", "yuv420p10le", "yuv422p10le", "yuv444p10le", "yuv420p12le", "yuv422p12le", "yuv444p12le", "yuv420p14le", "yuv422p14le", "yuv444p14le", "yuv420p16le", "yuv422p16le", "yuv444p16le", "gray", "gray10le", "gray12le", "gray16le"], {"default": "rgba64le"}] + ], + "audio_pass": ["-c:a", "flac"], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "trim_to_audio": ["trim_to_audio", "BOOLEAN", {"default": false}], + "input_color_depth": "16bit", + "extension": "mkv" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/gifski.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/gifski.json new file mode 100644 index 0000000000000000000000000000000000000000..00a3feb8da2fba25c96a30d03fb818146f274994 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/gifski.json @@ -0,0 +1,12 @@ +{ + "main_pass": + [ + "-pix_fmt", "yuv444p", + "-vf", "scale=out_color_matrix=bt709:out_range=pc", + "-color_range", "pc" + ], + "extension": "gif", + "gifski_pass": [ + "-Q", ["quality","INT", {"default": 90, "min": 1, "max": 100, "step": 1}] + ] +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/h264-mp4.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/h264-mp4.json new file mode 100644 index 0000000000000000000000000000000000000000..e90a55b317672a1ae9556baf616e4cb5e48326cd --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/h264-mp4.json @@ -0,0 +1,15 @@ +{ + "main_pass": + [ + "-n", "-c:v", "libx264", + "-pix_fmt", ["pix_fmt", ["yuv420p", "yuv420p10le"]], + "-crf", ["crf","INT", {"default": 19, "min": 0, "max": 100, "step": 1}], + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "aac"], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "trim_to_audio": ["trim_to_audio", "BOOLEAN", {"default": false}], + "extension": "mp4" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/h265-mp4.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/h265-mp4.json new file mode 100644 index 0000000000000000000000000000000000000000..c73c32a01a8bde1dd1623c18782375a936596cbc --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/h265-mp4.json @@ -0,0 +1,17 @@ +{ + "main_pass": + [ + "-n", "-c:v", "libx265", + "-vtag", "hvc1", + "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]], + "-crf", ["crf","INT", {"default": 22, "min": 0, "max": 100, "step": 1}], + "-preset", "medium", + "-x265-params", "log-level=quiet", + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "aac"], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "extension": "mp4" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_av1-mp4.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_av1-mp4.json new file mode 100644 index 0000000000000000000000000000000000000000..d160c93388f37c7f2c667b4e005e5b55a523aa3e --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_av1-mp4.json @@ -0,0 +1,15 @@ +{ + "main_pass": + [ + "-n", "-c:v", "av1_nvenc", + "-pix_fmt", ["pix_fmt", ["yuv420p", "p010le"]], + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "aac"], + "bitrate": ["bitrate","INT", {"default": 10, "min": 1, "max": 999, "step": 1 }], + "megabit": ["megabit","BOOLEAN", {"default": true}], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "extension": "mp4" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_h264-mp4.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_h264-mp4.json new file mode 100644 index 0000000000000000000000000000000000000000..5ca6be63f97a2acdddcb3962fca6d15b5a0053e9 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_h264-mp4.json @@ -0,0 +1,15 @@ +{ + "main_pass": + [ + "-n", "-c:v", "h264_nvenc", + "-pix_fmt", ["pix_fmt", ["yuv420p", "p010le"]], + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "aac"], + "bitrate": ["bitrate","INT", {"default": 10, "min": 1, "max": 999, "step": 1 }], + "megabit": ["megabit","BOOLEAN", {"default": true}], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "extension": "mp4" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_hevc-mp4.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_hevc-mp4.json new file mode 100644 index 0000000000000000000000000000000000000000..56bf0ec621732c23946c29201253d6798dd20dbc --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_hevc-mp4.json @@ -0,0 +1,16 @@ +{ + "main_pass": + [ + "-n", "-c:v", "hevc_nvenc", + "-vtag", "hvc1", + "-pix_fmt", ["pix_fmt", ["yuv420p", "p010le"]], + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "aac"], + "bitrate": ["bitrate","INT", {"default": 10, "min": 1, "max": 999, "step": 1 }], + "megabit": ["megabit","BOOLEAN", {"default": true}], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "extension": "mp4" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/webm.json b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/webm.json new file mode 100644 index 0000000000000000000000000000000000000000..a2845d217bd145c744df850b654b5366bc18d238 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/video_formats/webm.json @@ -0,0 +1,16 @@ +{ + "main_pass": + [ + "-n", + "-pix_fmt", ["pix_fmt",["yuv420p","yuva420p"]], + "-crf", ["crf","INT", {"default": 20, "min": 0, "max": 100, "step": 1}], + "-b:v", "0", + "-vf", "scale=out_color_matrix=bt709", + "-color_range", "tv", "-colorspace", "bt709", "-color_primaries", "bt709", "-color_trc", "bt709" + ], + "fake_trc": "bt709", + "audio_pass": ["-c:a", "libvorbis"], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}], + "trim_to_audio": ["trim_to_audio", "BOOLEAN", {"default": false}], + "extension": "webm" +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/batched_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/batched_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75e12a58342c9a6d170f5307f7ffe4534b50ef94 Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/batched_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/documentation.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/documentation.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09db22d747f095b698f250a619d15fcc9910ec92 Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/documentation.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/image_latent_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/image_latent_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46168028be118dc4d9a8d82f50aec747bfe3636f Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/image_latent_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/latent_preview.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/latent_preview.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..606bc2d61878774895fa3f979ac282804be4ccda Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/latent_preview.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/load_images_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/load_images_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b0b1818ba818a301b83276e2ae8199983358fdf Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/load_images_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/load_video_nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/load_video_nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08f4e7a3f62a3e8053d2d4604cbd909066023618 Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/load_video_nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/logger.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/logger.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72950e69beb1b8bf4b83d18e4a39b71c7dff5f5d Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/logger.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7b5e1450dea70016d2966ab053aa2c43edd99bc Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/server.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/server.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..261426f8f923485f372074ad456ba74f6c37b4ea Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/server.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/utils.cpython-313.pyc b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7925a6abaf1ec4a0b6782311498fc6fd7e87024 Binary files /dev/null and b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/__pycache__/utils.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/batched_nodes.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/batched_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..57d0e933e45294d0d0f9557c6ba1383051c88113 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/batched_nodes.py @@ -0,0 +1,56 @@ +import torch +from nodes import VAEEncode +from comfy.utils import ProgressBar + + +class VAEDecodeBatched: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "samples": ("LATENT", ), + "vae": ("VAE", ), + "per_batch": ("INT", {"default": 16, "min": 1}) + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/batched nodes" + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "decode" + + def decode(self, vae, samples, per_batch): + decoded = [] + pbar = ProgressBar(samples["samples"].shape[0]) + for start_idx in range(0, samples["samples"].shape[0], per_batch): + decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch])) + pbar.update(per_batch) + return (torch.cat(decoded, dim=0), ) + + +class VAEEncodeBatched: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pixels": ("IMAGE", ), "vae": ("VAE", ), + "per_batch": ("INT", {"default": 16, "min": 1}) + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/batched nodes" + + RETURN_TYPES = ("LATENT",) + FUNCTION = "encode" + + def encode(self, vae, pixels, per_batch): + t = [] + pbar = ProgressBar(pixels.shape[0]) + for start_idx in range(0, pixels.shape[0], per_batch): + try: + sub_pixels = vae.vae_encode_crop_pixels(pixels[start_idx:start_idx+per_batch]) + except: + sub_pixels = VAEEncode.vae_encode_crop_pixels(pixels[start_idx:start_idx+per_batch]) + t.append(vae.encode(sub_pixels[:,:,:,:3])) + pbar.update(per_batch) + return ({"samples": torch.cat(t, dim=0)}, ) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/documentation.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/documentation.py new file mode 100644 index 0000000000000000000000000000000000000000..9d974239d632a5cec9394cf6f3d8aedd272f58dc --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/documentation.py @@ -0,0 +1,616 @@ +from .logger import logger + +def image(src): + return f'' +def video(src): + return f'' +def short_desc(desc): + return f'{desc}' + +def format_each(desc, **kwargs): + if isinstance(desc, dict): + res = {} + for k,v in desc.items(): + res[format_each(k, **kwargs)] = format_each(v, **kwargs) + return res + if isinstance(desc, list): + res = [] + for v in desc: + res.append(format_each(v, **kwargs)) + return res + return desc.format(**kwargs) +def format_type(desc, lower, lowers=None, upper=None, uppers=None, cap=None): + """Utility function for nodes with image/latent/mask variants""" + if lowers is None: + lowers = lower + 's' + if cap is None: + cap = lower.capitalize() + if upper is None: + upper = lower.upper() + if uppers is None: + uppers = lowers.upper() + return format_each(desc, lower=lower, lowers=lowers, upper=upper, uppers=uppers, cap=cap) + +common_descriptions = { + 'merge_strategy': [ + 'Determines what the output resolution will be if input resolutions don\'t match', + {'match A': 'Always use the resolution for A', + 'match B': 'Always use the resolution for B', + 'match smaller': 'Pick the smaller resolution by area', + 'match larger': 'Pick the larger resolution by area', + }], + 'scale_method': [ + 'Determines what method to use if scaling is required', + ], + 'crop_method': 'When sizes don\'t match, should the resized image have it\'s aspect ratio changed, or be cropped to maintain aspect ratio', + 'VHS_PATH': [ + 'This is a VHS_PATH input. When edited, it provides a list of possible valid files or directories', + video('https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite/assets/4284322/729b7185-1fca-41d8-bc8d-a770bb2a5ce6'), + 'The current top-most completion may be selected with Tab', + 'You can navigate up a directory by pressing Ctrl+B (or Ctrl+W if supported by browser)', + 'The filter on suggested file types can be disabled by pressing Ctrl+G.', + 'If converted to an input, this functions as a string', + ], + "GetCount": ['Get {cap} Count 🎥🅥🅗🅢', short_desc('Return the number of {lowers} in an input as an INT'), + {'Inputs': { + '{lowers}': 'The input {lower}', + }, + 'Outputs': { + 'count': 'The number of {lowers} in the input', + }, + }], + "SelectEveryNth": ['Select Every Nth {cap} 🎥🅥🅗🅢', short_desc('Keep only 1 {lower} for every interval'), + {'Inputs': { + '{lowers}': 'The input {lower}', + }, + 'Outputs': { + '{upper}': 'The output {lowers}', + 'count': 'The number of {lowers} in the input', + }, + 'Widgets':{ + 'select_every_nth': 'The interval from which one frame is kept. 1 means no frames are skipped.', + 'skip_first_{lowers}': 'A number of frames which that is skipped from the start. This applies before select_every_nth. As a result, multiple copies of the node can each have a different skip_first_frames to divide the {lower} into groups' + }, + }], +} + +descriptions = { + 'VHS_VideoCombine': ['Video Combine 🎥🅥🅗🅢', short_desc('Combine an image sequence into a video'), { + 'Inputs': { + 'images': 'The images to be turned into a video', + 'audio':'(optional) audio to add to the video', + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long image sequences into sub batches. See the documentation for Meta Batch Manager', + 'vae':['(optional) If provided, the node will take latents as input instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences', + "Unlike on Load Video, this isn't always a strict upgrade over using a standalone VAE Decode.", + "If you have multiple Video Combine outputs, then the VAE decode will be performed for each output node increasing execution time", + "If you make any change to output settings on the Video Combine (such as changing the output format), the VAE decode will be performed again as the decoded result is (by design) not cached", + ] + }, + 'Widgets':{ + 'frame_rate': 'The frame rate which will be used for the output video. Consider converting this to an input and connecting this to a Load Video with Video Info(Loaded)->fps. When including audio, failure to properly set this will result in audio desync', + 'loop_count': 'The number of additional times the video should repeat. Can cause performance issues when used with long (100+ frames) sequences', + 'filename_prefix': 'A prefix to add to the name of the output filename. This can include subfolders or format strings.', + 'format': 'The output format to use. Formats starting with, \'image\' are saved with PIL, but formats starting with \'video\' utilize the video_formats system. \'video\' options require ffmpeg and selecting one frequently adds additional options to the node.', + 'pingpong': 'Play the video normally, then repeat the video in reverse so that it \'pingpongs\' back and forth. This is frequently used to minimize the appearance of skips on very short animations.', + 'save_output': 'Specifies if output files should be saved to the output folder, or the temporary output folder', + 'videopreview': 'Displays a preview for the processed result. If advanced previews is enabled, the output is always converted to a format viewable from the browser. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.', + }, + 'Common Format Widgets': { + 'crf': 'Determines how much to prioritize quality over filesize. Numbers vary between formats, but on each format that includes it, the default value provides visually loss less output', + 'pix_fmt': ['The pixel format to use for output. Alternative options will often have higher quality at the cost of increased file size and reduced compatibility with external software.', { + 'yuv420p': 'The most common and default format', + 'yuv420p10le': 'Use 10 bit color depth. This can improve color quality when combined with 16bit input color depth', + 'yuva420p': 'Include transparency in the output video' + }], + 'input_color_depth': 'VHS supports outputting 16bit images. While this produces higher quality output, the difference usually isn\'t visible without postprocessing and it significantly increases file size and processing time.', + 'save_metadata': 'Determines if metadata for the workflow should be included in the output video file', + } + }], + 'VHS_LoadVideo': ['Load Video 🎥🅥🅗🅢', short_desc('Loads a video from the input folder'), + {'Inputs': { + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager', + 'vae': ['(optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences', + 'Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet', + ], + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'frame_count': 'The length of images just returned', + 'audio': 'The audio from the loaded video', + 'video_info': 'Exposes additional info about the video such as the source frame rate, or the total length', + 'LATENT': 'The loaded images pre-converted to latents. Only available when a vae is connected', + }, + 'Widgets': { + 'video': 'The video file to be loaded. Lists all files with a video extension in the ComfyUI/Input folder', + 'force_rate': 'Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.', + 'force_size': 'Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.', + 'custom_width': 'Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set', + 'custom_height': 'Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set', + 'frame_load_cap': 'The maximum number of frames to load. If 0, all frames are loaded.', + 'skip_first_frames': 'A number of frames which are discarded before producing output.', + 'select_every_nth': 'Similar to frame rate. Keeps only the first of every n frames and discard the rest. Has better compatibility with variable frame rate inputs such as gifs. When combined with force_rate, select_every_nth_applies after force_rate so the resulting output has a frame rate equivalent to force_rate/select_every_nth. select_every_nth does not apply to skip_first_frames', + 'format': 'Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.', + 'choose video to upload': 'An upload button is provided to upload local files to the input folder', + 'videopreview': 'Displays a preview for the selected video input. If advanced previews is enabled, this preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.', + } + }], + 'VHS_LoadVideoFFmpeg': ['Load Video FFmpeg 🎥🅥🅗🅢', short_desc('Loads a video from the input folder using ffmpeg instead of opencv'), + 'Provides faster execution speed, transparency support, and allows specifying start time in seconds', + {'Inputs': { + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager', + 'vae': ['(optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences', + 'Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet', + ], + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'mask': 'Transparency data from the loaded video', + 'audio': 'The audio from the loaded video', + 'video_info': 'Exposes additional info about the video such as the source frame rate, or the total length', + 'LATENT': 'The loaded images pre-converted to latents. Only available when a vae is connected', + }, + 'Widgets': { + 'video': 'The video file to be loaded. Lists all files with a video extension in the ComfyUI/Input folder', + 'force_rate': 'Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.', + 'force_size': 'Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.', + 'custom_width': 'Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set', + 'custom_height': 'Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set', + 'frame_load_cap': 'The maximum number of frames to load. If 0, all frames are loaded.', + 'start_time': 'A timestamp, in seconds from the start of the video, to start loading frames from. ', + 'format': 'Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.', + 'choose video to upload': 'An upload button is provided to upload local files to the input folder', + 'videopreview': 'Displays a preview for the selected video input. If advanced previews is enabled, this preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.', + } + }], + 'VHS_LoadVideoPath': ['Load Video (Path) 🎥🅥🅗🅢', short_desc('Loads a video from an arbitrary path'), + {'Inputs': { + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager', + 'vae': ['(optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences', + 'Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet', + ], + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'frame_count': 'The length of images just returned', + 'audio': 'The audio from the loaded video', + 'video_info': 'Exposes additional info about the video such as the source frame rate, or the total length', + 'LATENT': 'The loaded images pre-converted to latents. Only available when a vae is connected', + }, + 'Widgets': { + 'video': ['The video file to be loaded.', 'You can also select an image to load it as a single frame'] + common_descriptions['VHS_PATH'], + 'force_rate': 'Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.', + 'force_size': 'Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.', + 'custom_width': 'Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set', + 'custom_height': 'Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set', + 'frame_load_cap': 'The maximum number of frames to load. If 0, all frames are loaded.', + 'skip_first_frames': 'A number of frames which are discarded before producing output.', + 'select_every_nth': 'Similar to frame rate. Keeps only the first of every n frames and discard the rest. Has better compatibility with variable frame rate inputs such as gifs. When combined with force_rate, select_every_nth_applies after force_rate so the resulting output has a frame rate equivalent to force_rate/select_every_nth. select_every_nth does not apply to skip_first_frames', + 'format': 'Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.', + 'videopreview': 'Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.', + } + }], + 'VHS_LoadVideoFFmpegPath': ['Load Video FFmpeg (Path) 🎥🅥🅗🅢', short_desc('Loads a video from an arbitrary path using ffmpeg instead of opencv'), + 'Provides faster execution speed, transparency support, and allows specifying start time in seconds', + {'Inputs': { + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager', + 'vae': ['(optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences', + 'Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet', + ], + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'mask': 'Transparency data from the loaded video', + 'audio': 'The audio from the loaded video', + 'video_info': 'Exposes additional info about the video such as the source frame rate, or the total length', + 'LATENT': 'The loaded images pre-converted to latents. Only available when a vae is connected', + }, + 'Widgets': { + 'video': ['The video file to be loaded.', 'You can also select an image to load it as a single frame'] + common_descriptions['VHS_PATH'], + 'force_rate': 'Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.', + 'force_size': 'Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.', + 'custom_width': 'Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set', + 'custom_height': 'Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set', + 'frame_load_cap': 'The maximum number of frames to load. If 0, all frames are loaded.', + 'skip_first_frames': 'A number of frames which are discarded before producing output.', + 'select_every_nth': 'Similar to frame rate. Keeps only the first of every n frames and discard the rest. Has better compatibility with variable frame rate inputs such as gifs. When combined with force_rate, select_every_nth_applies after force_rate so the resulting output has a frame rate equivalent to force_rate/select_every_nth. select_every_nth does not apply to skip_first_frames', + 'format': 'Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.', + 'videopreview': 'Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.', + } + }], + 'VHS_LoadImages': ['Load Images 🎥🅥🅗🅢', short_desc('Loads a sequence of images from a subdirectory of the input folder'), + {'Inputs': { + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager', + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'MASK': 'The alpha channel of the loaded images.', + 'frame_count': 'The length of images just returned', + }, + 'Widgets': { + 'directory': 'The directory images will be loaded from. Filtered to process jpg, png, ppm, bmp, tif, and webp files', + 'image_load_cap': 'The maximum number of images to load. If 0, all images are loaded.', + 'start_time': 'A timestamp, in seconds from the start of the video, to start loading frames from. ', + 'choose folder to upload': 'An upload button is provided to upload a local folder containing images to the input folder', + 'videopreview': 'Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the image_load_cap, skip_first_images, and select_every_nth values chosen. Additional preview options can be accessed with right click.', + } + }], + 'VHS_LoadImagesPath': ['Load Images (Path) 🎥🅥🅗🅢', short_desc('Loads a sequence of images from an arbitrary path'), + {'Inputs': { + 'meta_batch': '(optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager', + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'MASK': 'The alpha channel of the loaded images.', + 'frame_count': 'The length of images just returned', + }, + 'Widgets': { + 'directory': ['The directory images will be loaded from. Filtered to process jpg, png, ppm, bmp, tif, and webp files'] + common_descriptions['VHS_PATH'], + 'image_load_cap': 'The maximum number of images to load. If 0, all images are loaded.', + 'skip_first_images': 'A number of images which are discarded before producing output.', + 'select_every_nth': 'Keeps only the first of every n frames and discard the rest.', + 'videopreview': 'Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the image_load_cap, skip_first_images, and select_every_nth values chosen. Additional preview options can be accessed with right click.', + } + }], + 'VHS_LoadImagePath': ['Load Image (Path) 🎥🅥🅗🅢', short_desc('Load a single image from a given path'), + {'Inputs': { + 'vae': '(optional) If provided the node will output latents instead of images.', + }, + 'Outputs': { + 'IMAGE': 'The loaded images', + 'MASK': 'The alpha channel of the loaded images.', + }, + 'Widgets': { + 'image': ['The image file to be loaded.'] + common_descriptions['VHS_PATH'], + 'force_size': ['Allows for conveniently scaling the input without requiring an additional node. Provides options to maintain aspect ratio or conveniently target common training formats for Animate Diff', {'custom_width': 'Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set', + 'custom_height': 'Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set'}], + 'videopreview': 'Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the image_load_cap, skip_first_images, and select_every_nth values chosen. Additional preview options can be accessed with right click.', + } + }], + "VHS_LoadAudio": ['Load Audio (Path) 🎥🅥🅗🅢', short_desc('Loads an audio file from an arbitrary path'), + {'Outputs': { + 'audio': 'The loaded audio', + }, + 'Widgets': { + 'audio_file': ['The audio file to be loaded.'] + common_descriptions['VHS_PATH'], + 'seek_seconds': 'An offset from the start of the sound file that the audio should start from', + } + }], + "VHS_LoadAudioUpload": ['Load Audio (Upload) 🎥🅥🅗🅢', short_desc('Loads an audio file from the input directory'), + "Very similar in functionality to the built-in LoadAudio. It was originally added before VHS swapped to use Comfy's internal AUDIO format, but provides the additional options for start time and duration", + {'Outputs': { + 'audio': 'The loaded audio', + }, + 'Widgets': { + 'audio': 'The audio file to be loaded.', + 'start_time': 'An offset from the start of the sound file that the audio should start from', + 'duration': 'A maximum limit for the audio. Disabled if 0', + 'choose audio to upload': 'An upload button is provided to upload an audio file to the input folder', + } + }], + "VHS_AudioToVHSAudio": ['Audio to legacy VHS_AUDIO 🎥🅥🅗🅢', short_desc('utility function for compatibility with external nodes'), + "VHS used to use an internal VHS_AUDIO format for routing audio between inputs and outputs. This format was intended to only be used internally and was designed with a focus on performance over ease of use. Since ComfyUI now has an internal AUDIO format, VHS now uses this format. However, some custom node packs were made that are external to both ComfyUI and VHS that use VHS_AUDIO. This node was added so that those external nodes can still function", + {'Inputs': { + 'audio': 'An input in the standardized AUDIO format', + }, + 'Outputs': { + 'vhs_audio': 'An output in the legacy VHS_AUDIO format for use with external nodes', + }, + }], + "VHS_VHSAudioToAudio": ['Legacy VHS_AUDIO to Audio 🎥🅥🅗🅢', short_desc('utility function for compatibility with external nodes'), + "VHS used to use an internal VHS_AUDIO format for routing audio between inputs and outputs. This format was intended to only be used internally and was designed with a focus on performance over ease of use. Since ComfyUI now has an internal AUDIO format, VHS now uses this format. However, some custom node packs were made that are external to both ComfyUI and VHS that use VHS_AUDIO. This node was added so that those external nodes can still function", + {'Inputs': { + 'vhs_audio': 'An input in the legacy VHS_AUDIO format produced by an external node', + }, + 'Outputs': { + 'vhs_audio': 'An output in the standardized AUDIO format', + }, + }], + "VHS_PruneOutputs": ['Prune Outputs 🎥🅥🅗🅢', short_desc('Automates deletion of undesired outputs from a Video Combine node.'), + 'Video Combine produces a number of file outputs in addition to the final output. Some of these, such as a video file without audio included, are implementation limitations and are not feasible to solve. As an alternative, the Prune Outputs node is added to automate the deletion of these file outputs if they are not desired', + {'Inputs': { + 'filenames': 'A connection from a Video Combine node to indicate which outputs should be pruned', + }, + 'Widgets': { + 'options': ['Which files should be deleted', + {'Intermediate': 'Delete any files that were required for intermediate processing but are not the final output, like the no-audio output file when audio is included', + 'Intermediate and Utility': 'Delete all produced files that aren\'t the final output, including the first frame png', + }]} + }], + "VHS_BatchManager": ['Meta Batch Manager 🎥🅥🅗🅢', short_desc('Split the processing of a very long video into sets of smaller Meta Batches'), + "The Meta Batch Manager allows for extremely long input videos to be processed when all other methods for fitting the content in RAM fail. It does not effect VRAM usage.", + "It must be connected to at least one Input (a Load Video or Load Images) AND at least one Video Combine", + image("https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite/assets/4284322/7cb3fb7e-59d8-4cb2-a09f-9c6698de8b1f"), + "It functions by holding both the inputs and ouputs open between executions, and automatically requeue's the workflow until one of the inputs is unable to provide additional images.", + "Because each sub execution only contains a subset of the total frames, each sub execution creates a hard window which temporal smoothing can not be applied across. This results in jumps in the output.", + {'Outputs': { + 'meta_batch': 'Add all connected nodes to this Meta Batch', + }, + 'Widgets': { + 'frames_per_batch': 'How many frames to process for each sub execution. If loading as image, each frame will use about 50MB of RAM (not VRAM), and this can safely be set in the 100-1000 range, depending on available memory. When loading and combining from latent space (no blue image noodles exist), this value can be much higher, around the 2,000 to 20,000 range', + } + }], + "VHS_VideoInfo": ['Video Info 🎥🅥🅗🅢', short_desc('Splits information on a video into a numerous outputs'), + {'Inputs': { + 'video_info': 'A connection to a Load Video node', + }, + 'Outputs': { + 'source_fps🟨': 'The frame rate of the video', + 'source_frame_count🟨': 'How many total frames the video contains before accounting for frame rate or select_every_nth', + 'source_duration🟨': 'The length of images just returned in seconds', + 'source_width🟨': 'The width', + 'source_height🟨': 'The height', + 'loaded_fps🟦': 'The frame rate after accounting for force_rate and select_every_nth. This output is of particular use as it can be connected to the converted frame_rate input of a Video Combine node to ensure audio remains synchronized.', + 'loaded_frame_count🟦': 'The number of frames returned by the current execution. Identical to the frame_count returned by the node itself', + 'loaded_duration🟦': 'The duration in seconds of returned images after accounting for frame_load_cap', + 'loaded_width🟦': 'The width of the video after scaling. These coordinates are in image space even if loading to latent space', + 'loaded_height🟦': 'The height of the video after scaling. These coordinates are in image space even if loading to latent space', + }, + }], + "VHS_VideoInfoSource": ['Video Info Source 🎥🅥🅗🅢', short_desc('Splits information on a video into a numerous outputs describing the file itself without accounting for load options'), + {'Inputs': { + 'video_info': 'A connection to a Load Video node', + }, + 'Outputs': { + 'source_fps🟨': 'The frame rate of the video', + 'source_frame_count🟨': 'How many total frames the video contains before accounting for frame rate or select_every_nth', + 'source_duration🟨': 'The length of images just returned in seconds', + 'source_width🟨': 'The original width', + 'source_height🟨': 'The original height', + } + }], + "VHS_VideoInfoLoaded": ['Video Info Loaded 🎥🅥🅗🅢', short_desc('Splits information on a video into a numerous outputs describing the file itself after accounting for load options'), + {'Inputs': { + 'video_info': 'A connection to a Load Video node', + }, + 'Outputs': { + 'loaded_fps🟦': 'The frame rate after accounting for force_rate and select_every_nth. This output is of particular use as it can be connected to the converted frame_rate input of a Video Combine node to ensure audio remains synchronized.', + 'loaded_frame_count🟦': 'The number of frames returned by the current execution. Identical to the frame_count returned by the node itself', + 'loaded_duration🟦': 'The duration in seconds of returned images after accounting for frame_load_cap', + 'loaded_width🟦': 'The width of the video after scaling. This is the dimension of the corresponding image even if loading as a latent directly', + 'loaded_height🟦': 'The height of the video after scaling. This is the dimension of the corresponding image even if loading as a latent directly', + } + }], + "VHS_SelectFilename": ['VAE Select Filename 🎥🅥🅗🅢', short_desc('Select a single filename from the VHS_FILENAMES output by a Video Combine and return it as a string'), + 'Take care when combining this node with Prune Outputs. The VHS_FILENAMES object is immutable and will always contain the full list of output files, but execution order is undefined behavior (currently, Prune Outputs will generally execute first) and SelectFilename may return a path to a file that no longer exists.', + {'Inputs': { + 'filenames': 'A VHS_FILENAMES from a Video Combine node', + }, + 'Outputs': { + 'filename': 'A string representation of the full output path for the chosen file', + }, + 'Widgets': { + 'index': 'The index of which file should be selected. The default, -1, chooses the most complete output', + }, + }], + # Batched Nodes + "VHS_VAEEncodeBatched": ['VAE Encode Batched 🎥🅥🅗🅢', short_desc('Encode images as latents with a manually specified batch size.'), + "Some people have ran into VRAM issues when encoding or decoding large batches of images. As a workaround, this node lets you manually set a batch size when encoding images.", + "Unless these issues have been encountered, it is simpler to use the native VAE Encode or to encode directly from a Load Video", + {'Inputs': { + 'pixels': 'The images to be encoded.', + 'vae': 'The VAE to use when encoding.', + }, + 'Outputs': { + 'LATENT': 'The encoded latents.', + }, + 'Widgets': { + 'per_batch': 'The maximum number of images to encode in each batch.', + }, + }], + "VHS_VAEDecodeBatched": ['VAE Decode Batched 🎥🅥🅗🅢', short_desc('Decode latents to images with a manually specified batch size'), + "Some people have ran into VRAM issues when encoding or decoding large batches of images. As a workaround, this node lets you manually set a batch size when decoding latents.", + "Unless these issues have been encountered, it is simpler to use the native VAE Decode or to decode from a Video Combine directly", + {'Inputs': { + 'samples': 'The latents to be decoded.', + 'vae': 'The VAE to use when decoding.', + }, + 'Outputs': { + 'IMAGE': 'The decoded images.', + }, + 'Widgets': { + 'per_batch': 'The maximum number of images to decode in each batch.', + }, + }], + # Latent and Image nodes + "VHS_SplitLatents": ['Split Latents 🎥🅥🅗🅢', short_desc('Split a set of latents into two groups'), + {'Inputs': { + 'latents': 'The latents to be split.', + }, + 'Outputs': { + 'LATENT_A': 'The first group of latents', + 'A_count': 'The number of latents in group A. This will be equal to split_index unless the latents input has length less than split_index', + 'LATENT_B': 'The second group of latents', + 'B_count': 'The number of latents in group B' + }, + 'Widgets': { + 'split_index': 'The index of the first latent that will be in the second output groups.', + }, + + }], + "VHS_SplitImages": ['Split Images 🎥🅥🅗🅢', short_desc('Split a set of images into two groups'), + {'Inputs': { + 'images': 'The images to be split.', + }, + 'Outputs': { + 'IMAGE_A': 'The first group of images', + 'A_count': 'The number of images in group A. This will be equal to split_index unless the images input has length less than split_index', + 'IMAGE_B': 'The second group of images', + 'B_count': 'The number of images in group B' + }, + 'Widgets': { + 'split_index': 'The index of the first latent that will be in the second output groups.', + }, + + }], + "VHS_SplitMasks": ['Split Masks 🎥🅥🅗🅢', short_desc('Split a set of masks into two groups'), + {'Inputs': { + 'mask': 'The masks to be split.', + }, + 'Outputs': { + 'MASK_A': 'The first group of masks', + 'A_count': 'The number of masks in group A. This will be equal to split_index unless the mask input has length less than split_index', + 'MASK_B': 'The second group of masks', + 'B_count': 'The number of masks in group B' + }, + 'Widgets': { + 'split_index': 'The index of the first latent that will be in the second output groups.', + }, + + }], + "VHS_MergeLatents": ['Merge Latents 🎥🅥🅗🅢', short_desc('Combine two groups of latents into a single group of latents'), + {'Inputs': { + 'latents_A': 'The first group of latents', + 'latents_B': 'The first group of latents', + }, + 'Outputs': { + 'LATENT': 'The combined group of latents', + 'count': 'The length of the combined group', + }, + 'Widgets': { + 'merge_strategy': common_descriptions['merge_strategy'], + 'scale_method': common_descriptions['scale_method'], + 'crop': common_descriptions['crop_method'], + }, + + }], + "VHS_MergeImages": ['Merge Images 🎥🅥🅗🅢', short_desc('Combine two groups of images into a single group of images'), + {'Inputs': { + 'images_A': 'The first group of images', + 'images_B': 'The first group of images', + }, + 'Outputs': { + 'IMAGE': 'The combined group of images', + 'count': 'The length of the combined group', + }, + 'Widgets': { + 'merge_strategy': common_descriptions['merge_strategy'], + 'scale_method': common_descriptions['scale_method'], + 'crop': common_descriptions['crop_method'], + }, + + }], + "VHS_MergeMasks": ['Merge Masks 🎥🅥🅗🅢', short_desc('Combine two groups of masks into a single group of masks'), + {'Inputs': { + 'mask_A': 'The first group of masks', + 'mask_B': 'The first group of masks', + }, + 'Outputs': { + 'MASK': 'The combined group of masks', + 'count': 'The length of the combined group', + }, + 'Widgets': { + 'merge_strategy': common_descriptions['merge_strategy'], + 'scale_method': common_descriptions['scale_method'], + 'crop': common_descriptions['crop_method'], + }, + + }], + "VHS_GetLatentCount": format_type(common_descriptions['GetCount'], 'latent'), + "VHS_GetImageCount": format_type(common_descriptions['GetCount'], 'image'), + "VHS_GetMaskCount": format_type(common_descriptions['GetCount'], 'mask'), + "VHS_DuplicateLatents": ['Repeat Latents 🎥🅥🅗🅢', short_desc('Append copies of a latent to itself so it repeats'), + {'Inputs': { + 'latents': 'The latents to be repeated', + }, + 'Outputs': { + 'LATENT': 'The latent with repeats', + 'count': 'The number of latents in the output. Equal to the length of the input latent * multiply_by', + }, + 'Widgets': { + 'multiply_by': 'Controls the number of times the latent should repeat. 1, the default, means no change.', + }, + }], + "VHS_DuplicateImages": ['Repeat Images 🎥🅥🅗🅢', short_desc('Append copies of a image to itself so it repeats'), + {'Inputs': { + 'IMAGES': 'The image to be repeated', + }, + 'Outputs': { + 'IMAGE': 'The image with repeats', + 'count': 'The number of image in the output. Equal to the length of the input image * multiply_by', + }, + 'Widgets': { + 'multiply_by': 'Controls the number of times the mask should repeat. 1, the default, means no change.', + }, + }], + "VHS_DuplicateMasks": ['Repeat Masks 🎥🅥🅗🅢', short_desc('Append copies of a mask to itself so it repeats'), + {'Inputs': { + 'masks': 'The masks to be repeated', + }, + 'Outputs': { + 'LATENT': 'The mask with repeats', + 'count': 'The number of mask in the output. Equal to the length of the input mask * multiply_by', + }, + 'Widgets': { + 'multiply_by': 'Controls the number of times the mask should repeat. 1, the default, means no change.', + }, + }], + "VHS_SelectEveryNthLatent": format_type(common_descriptions['SelectEveryNth'], 'latent'), + "VHS_SelectEveryNthImage": format_type(common_descriptions['SelectEveryNth'], 'image'), + #TODO: fix discrepency of input being mask instead of masks? + "VHS_SelectEveryNthMask": format_type(common_descriptions['SelectEveryNth'], 'mask', lowers='mask'), + #TODO: port documentation for select nodes to new system + #"VHS_SelectLatents": None, + #"VHS_SelectImages": None, + #"VHS_SelectMasks": None, + "VHS_Unbatch": ['Unbatch 🎥🅥🅗🅢', short_desc('Unbatch a list of items into a single concatenated item'), + "Useful for when you want a single video output from a complex workflow", + "Has no relation to the Meta Batch system of VHS", + {'Inputs': { + 'batched': 'Any input which may or may not be batched', + }, + 'Outputs': { + 'unbatched': 'A single output element. Torch tensors are concatenated across dim 0, all other types are added which functions as concatenation for strings and arrays, but may give undesired results for other types', + }, + }], + "VHS_SelectLatest": ['Select Latest 🎥🅥🅗🅢', short_desc('Experimental virtual node to select the most recently modified file from a given folder'), + "Assists in the creation of workflows where outputs from one execution are used elsewhere in subsequent executions.", + {'Inputs': { + 'filename_prefix': 'A path which can consist of a combination of folders and a prefix which candidate files must match', + 'filename_postfix': 'A string which chich the selected file must end with. Useful for limiting to a target extension.', + }, + 'Outputs': { + 'Filename': 'A string representing a file path to the most recently modified file.', + }, + }], +} + +def as_html(entry, depth=0): + if isinstance(entry, dict): + size = 0.8 if depth < 2 else 1 + html = '' + for k in entry: + if k == "collapsed": + continue + collapse_single = k.endswith("_collapsed") + if collapse_single: + name = k[:-len("_collapsed")] + else: + name = k + collapse_flag = ' VHS_precollapse' if entry.get("collapsed", False) or collapse_single else '' + html += f'[-]{name}: {as_html(entry[k], depth=depth+1)}' + return html + if isinstance(entry, list): + if depth == 0: + depth += 1 + size = .8 + else: + size = 1 + html = '' + html += entry[0] + for i in entry[1:]: + html += f'{as_html(i, depth=depth)}' + return html + return str(entry) + +def format_descriptions(nodes): + for k in descriptions: + if k.endswith("_collapsed"): + k = k[:-len("_collapsed")] + nodes[k].DESCRIPTION = as_html(descriptions[k]) + undocumented_nodes = [] + for k in nodes: + if not hasattr(nodes[k], "DESCRIPTION"): + undocumented_nodes.append(k) + if len(undocumented_nodes) > 0: + logger.warning('Some nodes have not been documented %s', undocumented_nodes) + diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/image_latent_nodes.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/image_latent_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..6cdf39cd5b54411f370504046cb262f3e89e098f --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/image_latent_nodes.py @@ -0,0 +1,554 @@ +from torch import Tensor +import torch + +import comfy.utils + +from .utils import BIGMIN, BIGMAX, select_indexes_from_str, convert_str_to_indexes, select_indexes + + +class MergeStrategies: + MATCH_A = "match A" + MATCH_B = "match B" + MATCH_SMALLER = "match smaller" + MATCH_LARGER = "match larger" + + list_all = [MATCH_A, MATCH_B, MATCH_SMALLER, MATCH_LARGER] + + +class ScaleMethods: + NEAREST_EXACT = "nearest-exact" + BILINEAR = "bilinear" + AREA = "area" + BICUBIC = "bicubic" + BISLERP = "bislerp" + + list_all = [NEAREST_EXACT, BILINEAR, AREA, BICUBIC, BISLERP] + + +class CropMethods: + DISABLED = "disabled" + CENTER = "center" + + list_all = [DISABLED, CENTER] + + +class SplitLatents: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "split_index": ("INT", {"default": 0, "step": 1, "min": BIGMIN, "max": BIGMAX}), + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent" + + RETURN_TYPES = ("LATENT", "INT", "LATENT", "INT") + RETURN_NAMES = ("LATENT_A", "A_count", "LATENT_B", "B_count") + FUNCTION = "split_latents" + + def split_latents(self, latents: dict[str, Tensor], split_index: int): + latents_len = len(latents["samples"]) + group_a = latents.copy() + group_b = latents.copy() + for key, val in latents.items(): + if type(val) == Tensor and len(val) == latents_len: + group_a[key] = latents[key][:split_index] + group_b[key] = latents[key][split_index:] + return (group_a, group_a["samples"].size(0), group_b, group_b["samples"].size(0)) + + +class SplitImages: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "split_index": ("INT", {"default": 0, "step": 1, "min": BIGMIN, "max": BIGMAX}), + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image" + + RETURN_TYPES = ("IMAGE", "INT", "IMAGE", "INT") + RETURN_NAMES = ("IMAGE_A", "A_count", "IMAGE_B", "B_count") + FUNCTION = "split_images" + + def split_images(self, images: Tensor, split_index: int): + group_a = images[:split_index] + group_b = images[split_index:] + return (group_a, group_a.size(0), group_b, group_b.size(0)) + + +class SplitMasks: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "split_index": ("INT", {"default": 0, "step": 1, "min": BIGMIN, "max": BIGMAX}), + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask" + + RETURN_TYPES = ("MASK", "INT", "MASK", "INT") + RETURN_NAMES = ("MASK_A", "A_count", "MASK_B", "B_count") + FUNCTION = "split_masks" + + def split_masks(self, mask: Tensor, split_index: int): + group_a = mask[:split_index] + group_b = mask[split_index:] + return (group_a, group_a.size(0), group_b, group_b.size(0)) + + +class MergeLatents: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents_A": ("LATENT",), + "latents_B": ("LATENT",), + "merge_strategy": (MergeStrategies.list_all,), + "scale_method": (ScaleMethods.list_all,), + "crop": (CropMethods.list_all,), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent" + + RETURN_TYPES = ("LATENT", "INT",) + RETURN_NAMES = ("LATENT", "count",) + FUNCTION = "merge" + + def merge(self, latents_A: dict, latents_B: dict, merge_strategy: str, scale_method: str, crop: str): + latents = [] + latents_A = latents_A.copy()["samples"] + latents_B = latents_B.copy()["samples"] + + # TODO: handle other properties on latents besides just "samples" + # if not same dimensions, do scaling + if latents_A.shape[3] != latents_B.shape[3] or latents_A.shape[2] != latents_B.shape[2]: + A_size = latents_A.shape[3] * latents_A.shape[2] + B_size = latents_B.shape[3] * latents_B.shape[2] + # determine which to use + use_A_as_template = True + if merge_strategy == MergeStrategies.MATCH_A: + pass + elif merge_strategy == MergeStrategies.MATCH_B: + use_A_as_template = False + elif merge_strategy in (MergeStrategies.MATCH_SMALLER, MergeStrategies.MATCH_LARGER): + if A_size <= B_size: + use_A_as_template = True if merge_strategy == MergeStrategies.MATCH_SMALLER else False + # apply scaling + if use_A_as_template: + latents_B = comfy.utils.common_upscale(latents_B, latents_A.shape[3], latents_A.shape[2], scale_method, crop) + else: + latents_A = comfy.utils.common_upscale(latents_A, latents_B.shape[3], latents_B.shape[2], scale_method, crop) + + latents.append(latents_A) + latents.append(latents_B) + + merged = {"samples": torch.cat(latents, dim=0)} + return (merged, len(merged["samples"]),) + + +class MergeImages: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images_A": ("IMAGE",), + "images_B": ("IMAGE",), + "merge_strategy": (MergeStrategies.list_all,), + "scale_method": (ScaleMethods.list_all,), + "crop": (CropMethods.list_all,), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image" + + RETURN_TYPES = ("IMAGE", "INT",) + RETURN_NAMES = ("IMAGE", "count",) + FUNCTION = "merge" + + def merge(self, images_A: Tensor, images_B: Tensor, merge_strategy: str, scale_method: str, crop: str): + images = [] + # if not same dimensions, do scaling + if images_A.shape[3] != images_B.shape[3] or images_A.shape[2] != images_B.shape[2]: + images_A = images_A.movedim(-1,1) + images_B = images_B.movedim(-1,1) + + A_size = images_A.shape[3] * images_A.shape[2] + B_size = images_B.shape[3] * images_B.shape[2] + # determine which to use + use_A_as_template = True + if merge_strategy == MergeStrategies.MATCH_A: + pass + elif merge_strategy == MergeStrategies.MATCH_B: + use_A_as_template = False + elif merge_strategy in (MergeStrategies.MATCH_SMALLER, MergeStrategies.MATCH_LARGER): + if A_size <= B_size: + use_A_as_template = True if merge_strategy == MergeStrategies.MATCH_SMALLER else False + # apply scaling + if use_A_as_template: + images_B = comfy.utils.common_upscale(images_B, images_A.shape[3], images_A.shape[2], scale_method, crop) + else: + images_A = comfy.utils.common_upscale(images_A, images_B.shape[3], images_B.shape[2], scale_method, crop) + images_A = images_A.movedim(1,-1) + images_B = images_B.movedim(1,-1) + + images.append(images_A) + images.append(images_B) + all_images = torch.cat(images, dim=0) + return (all_images, all_images.size(0),) + + +class MergeMasks: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask_A": ("MASK",), + "mask_B": ("MASK",), + "merge_strategy": (MergeStrategies.list_all,), + "scale_method": (ScaleMethods.list_all,), + "crop": (CropMethods.list_all,), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask" + + RETURN_TYPES = ("MASK", "INT",) + RETURN_NAMES = ("MASK", "count",) + FUNCTION = "merge" + + def merge(self, mask_A: Tensor, mask_B: Tensor, merge_strategy: str, scale_method: str, crop: str): + masks = [] + # if not same dimensions, do scaling + if mask_A.shape[2] != mask_B.shape[2] or mask_A.shape[1] != mask_B.shape[1]: + A_size = mask_A.shape[2] * mask_A.shape[1] + B_size = mask_B.shape[2] * mask_B.shape[1] + # determine which to use + use_A_as_template = True + if merge_strategy == MergeStrategies.MATCH_A: + pass + elif merge_strategy == MergeStrategies.MATCH_B: + use_A_as_template = False + elif merge_strategy in (MergeStrategies.MATCH_SMALLER, MergeStrategies.MATCH_LARGER): + if A_size <= B_size: + use_A_as_template = True if merge_strategy == MergeStrategies.MATCH_SMALLER else False + # add dimension where image channels would be expected to work with common_upscale + mask_A = torch.unsqueeze(mask_A, 1) + mask_B = torch.unsqueeze(mask_B, 1) + # apply scaling + if use_A_as_template: + mask_B = comfy.utils.common_upscale(mask_B, mask_A.shape[3], mask_A.shape[2], scale_method, crop) + else: + mask_A = comfy.utils.common_upscale(mask_A, mask_B.shape[3], mask_B.shape[2], scale_method, crop) + # undo dimension increase + mask_A = torch.squeeze(mask_A, 1) + mask_B = torch.squeeze(mask_B, 1) + + masks.append(mask_A) + masks.append(mask_B) + all_masks = torch.cat(masks, dim=0) + return (all_masks, all_masks.size(0),) + + +class SelectEveryNthLatent: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + "skip_first_latents": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent" + + RETURN_TYPES = ("LATENT", "INT",) + RETURN_NAMES = ("LATENT", "count",) + FUNCTION = "select_latents" + + def select_latents(self, latents: dict[str, Tensor], select_every_nth: int, skip_first_latents: int): + latents = latents.copy() + latents_len = len(latents["samples"]) + for key, val in latents.items(): + if type(val) == Tensor and len(val) == latents_len: + latents[key] = val[skip_first_latents::select_every_nth] + return (latents, latents["samples"].size(0)) + + +class SelectEveryNthImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + "skip_first_images": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image" + + RETURN_TYPES = ("IMAGE", "INT",) + RETURN_NAMES = ("IMAGE", "count",) + FUNCTION = "select_images" + + def select_images(self, images: Tensor, select_every_nth: int, skip_first_images: int): + sub_images = images[skip_first_images::select_every_nth] + return (sub_images, sub_images.size(0)) + + +class SelectEveryNthMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + "skip_first_masks": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask" + + RETURN_TYPES = ("MASK", "INT",) + RETURN_NAMES = ("MASK", "count",) + FUNCTION = "select_masks" + + def select_masks(self, mask: Tensor, select_every_nth: int, skip_first_masks: int): + sub_mask = mask[skip_first_masks::select_every_nth] + return (sub_mask, sub_mask.size(0)) + + +class GetLatentCount: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent" + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("count",) + FUNCTION = "count_input" + + def count_input(self, latents: dict): + return (latents["samples"].size(0),) + + +class GetImageCount: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image" + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("count",) + FUNCTION = "count_input" + + def count_input(self, images: Tensor): + return (images.size(0),) + + +class GetMaskCount: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask" + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("count",) + FUNCTION = "count_input" + + def count_input(self, mask: Tensor): + return (mask.size(0),) + + +class RepeatLatents: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "multiply_by": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}) + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent" + + RETURN_TYPES = ("LATENT", "INT",) + RETURN_NAMES = ("LATENT", "count",) + FUNCTION = "duplicate_input" + + def duplicate_input(self, latents: dict[str, Tensor], multiply_by: int): + latents = latents.copy() + latents_len = len(latents["samples"]) + for key, val in latents.items(): + if type(val) == Tensor and len(val) == latents_len: + full_latents = [] + for _ in range(0, multiply_by): + full_latents.append(latents[key]) + latents[key] = torch.cat(full_latents, dim=0) + return (latents, latents["samples"].size(0),) + + +class RepeatImages: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "multiply_by": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}) + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image" + + RETURN_TYPES = ("IMAGE", "INT",) + RETURN_NAMES = ("IMAGE", "count",) + FUNCTION = "duplicate_input" + + def duplicate_input(self, images: Tensor, multiply_by: int): + full_images = [] + for n in range(0, multiply_by): + full_images.append(images) + new_images = torch.cat(full_images, dim=0) + return (new_images, new_images.size(0),) + + +class RepeatMasks: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "multiply_by": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}) + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask" + + RETURN_TYPES = ("MASK", "INT",) + RETURN_NAMES = ("MASK", "count",) + FUNCTION = "duplicate_input" + + def duplicate_input(self, mask: Tensor, multiply_by: int): + full_masks = [] + for n in range(0, multiply_by): + full_masks.append(mask) + new_mask = torch.cat(full_masks, dim=0) + return (new_mask, new_mask.size(0),) + + +select_description = """Use comma-separated indexes to select items in the given order. +Supports negative indexes, python-style ranges (end index excluded), +as well as range step. + +Acceptable entries (assuming 16 items provided, so idxs 0 to 15 exist): +0 -> Returns [0] +-1 -> Returns [15] +0, 1, 13 -> Returns [0, 1, 13] +0:5, 13 -> Returns [0, 1, 2, 3, 4, 13] +0:-1 -> Returns [0, 1, 2, ..., 13, 14] +0:5:-1 -> Returns [4, 3, 2, 1, 0] +0:5:2 -> Returns [0, 2, 4] +::-1 -> Returns [15, 14, 13, ..., 2, 1, 0] +""" +class SelectLatents: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latent": ("LATENT",), + "indexes": ("STRING", {"default": "0"}), + "err_if_missing": ("BOOLEAN", {"default": True}), + "err_if_empty": ("BOOLEAN", {"default": True}), + }, + } + + DESCRIPTION = select_description + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent" + + RETURN_TYPES = ("LATENT",) + FUNCTION = "select" + + def select(self, latent: dict[str, Tensor], indexes: str, err_if_missing: bool, err_if_empty: bool): + # latents are a dict and may contain different stuff (like noise_mask), so need to account for it all + latent = latent.copy() + latents_len = len(latent["samples"]) + real_idxs = convert_str_to_indexes(indexes, latents_len, allow_missing=not err_if_missing) + if err_if_empty and len(real_idxs) == 0: + raise Exception(f"Nothing was selected based on indexes found in '{indexes}'.") + for key, val in latent.items(): + if type(val) == Tensor and len(val) == latents_len: + latent[key] = select_indexes(val, real_idxs) + return (latent,) + + +class SelectImages: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "indexes": ("STRING", {"default": "0"}), + "err_if_missing": ("BOOLEAN", {"default": True}), + "err_if_empty": ("BOOLEAN", {"default": True}), + }, + } + + DESCRIPTION = select_description + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image" + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "select" + + def select(self, image: Tensor, indexes: str, err_if_missing: bool, err_if_empty: bool): + to_return = select_indexes_from_str(input_obj=image, indexes=indexes, + err_if_missing=err_if_missing, err_if_empty=err_if_empty) + to_return_type = type(to_return) + return (to_return,) + + +class SelectMasks: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "indexes": ("STRING", {"default": "0"}), + "err_if_missing": ("BOOLEAN", {"default": True}), + "err_if_empty": ("BOOLEAN", {"default": True}), + }, + } + + DESCRIPTION = select_description + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask" + + RETURN_TYPES = ("MASK",) + FUNCTION = "select" + + def select(self, mask: Tensor, indexes: str, err_if_missing: bool, err_if_empty: bool): + return (select_indexes_from_str(input_obj=mask, indexes=indexes, + err_if_missing=err_if_missing, err_if_empty=err_if_empty),) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/latent_preview.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/latent_preview.py new file mode 100644 index 0000000000000000000000000000000000000000..176450a610e2f198e44c083422a2c503dcce4ced --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/latent_preview.py @@ -0,0 +1,114 @@ +from PIL import Image +import time +import io +import struct +from threading import Thread +import torch.nn.functional as F +import torch + +import latent_preview +import server +serv = server.PromptServer.instance + +from .utils import hook + +rates_table = {'Mochi': 24//6, 'LTXV': 24//8, 'HunyuanVideo': 24//4, + 'Cosmos1CV8x8x8': 24//8, 'Wan21': 16//4, 'Wan22': 24//4} + +class WrappedPreviewer(latent_preview.LatentPreviewer): + def __init__(self, previewer, rate=8): + self.first_preview = True + self.last_time = 0 + self.c_index = 0 + self.rate = rate + if hasattr(previewer, 'taesd'): + self.taesd = previewer.taesd + elif hasattr(previewer, 'latent_rgb_factors'): + self.latent_rgb_factors = previewer.latent_rgb_factors + self.latent_rgb_factors_bias = previewer.latent_rgb_factors_bias + self.latent_rgb_factors_reshape = getattr(previewer, 'latent_rgb_factors_reshape', None) + else: + raise Exception('Unsupported preview type for VHS animated previews') + + def decode_latent_to_preview_image(self, preview_format, x0): + if x0.ndim == 5: + #Keep batch major + x0 = x0.movedim(2,1) + x0 = x0.reshape((-1,)+x0.shape[-3:]) + num_images = x0.size(0) + new_time = time.time() + num_previews = int((new_time - self.last_time) * self.rate) + self.last_time = self.last_time + num_previews/self.rate + if num_previews > num_images: + num_previews = num_images + elif num_previews <= 0: + return None + if self.first_preview: + self.first_preview = False + serv.send_sync('VHS_latentpreview', {'length':num_images, 'rate': self.rate, 'id': serv.last_node_id}) + self.last_time = new_time + 1/self.rate + if self.c_index + num_previews > num_images: + x0 = x0.roll(-self.c_index, 0)[:num_previews] + else: + x0 = x0[self.c_index:self.c_index + num_previews] + Thread(target=self.process_previews, args=(x0, self.c_index, + num_images)).run() + self.c_index = (self.c_index + num_previews) % num_images + return None + def process_previews(self, image_tensor, ind, leng): + image_tensor = self.decode_latent_to_preview(image_tensor) + if image_tensor.size(1) > 512 or image_tensor.size(2) > 512: + image_tensor = image_tensor.movedim(-1,0) + if image_tensor.size(2) < image_tensor.size(3): + height = (512 * image_tensor.size(2)) // image_tensor.size(3) + image_tensor = F.interpolate(image_tensor, (height,512), mode='bilinear') + else: + width = (512 * image_tensor.size(3)) // image_tensor.size(2) + image_tensor = F.interpolate(image_tensor, (512, width), mode='bilinear') + image_tensor = image_tensor.movedim(0,-1) + previews_ubyte = (((image_tensor + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1 + .mul(0xFF) # to 0..255 + ).to(device="cpu", dtype=torch.uint8) + for preview in previews_ubyte: + i = Image.fromarray(preview.numpy()) + message = io.BytesIO() + message.write((1).to_bytes(length=4, byteorder='big')*2) + message.write(ind.to_bytes(length=4, byteorder='big')) + message.write(struct.pack('16p', serv.last_node_id.encode('ascii'))) + i.save(message, format="JPEG", quality=95, compress_level=1) + #NOTE: send sync already uses call_soon_threadsafe + serv.send_sync(server.BinaryEventTypes.PREVIEW_IMAGE, + message.getvalue(), serv.client_id) + ind = (ind + 1) % leng + def decode_latent_to_preview(self, x0): + if hasattr(self, 'taesd'): + x_sample = self.taesd.decode(x0).movedim(1, 3) + return x_sample + else: + if self.latent_rgb_factors_reshape is not None: + x0 = self.latent_rgb_factors_reshape(x0) + self.latent_rgb_factors = self.latent_rgb_factors.to(dtype=x0.dtype, device=x0.device) + if self.latent_rgb_factors_bias is not None: + self.latent_rgb_factors_bias = self.latent_rgb_factors_bias.to(dtype=x0.dtype, device=x0.device) + latent_image = F.linear(x0.movedim(1, -1), self.latent_rgb_factors, + bias=self.latent_rgb_factors_bias) + return latent_image + +@hook(latent_preview, 'get_previewer') +def get_latent_video_previewer(device, latent_format, *args, **kwargs): + node_id = serv.last_node_id + previewer = get_latent_video_previewer.__wrapped__(device, latent_format, *args, **kwargs) + try: + extra_info = next(serv.prompt_queue.currently_running.values().__iter__()) \ + [3]['extra_pnginfo']['workflow']['extra'] + prev_setting = extra_info.get('VHS_latentpreview', False) + if extra_info.get('VHS_latentpreviewrate', 0) != 0: + rate_setting = extra_info['VHS_latentpreviewrate'] + else: + rate_setting = rates_table.get(latent_format.__class__.__name__, 8) + except: + #For safety since there's lots of keys, any of which can fail + prev_setting = False + if not prev_setting or not hasattr(previewer, "decode_latent_to_preview"): + return previewer + return WrappedPreviewer(previewer, rate_setting) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_images_nodes.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_images_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..63a6e881206252dd60916cdbb2c6258f26e6f055 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_images_nodes.py @@ -0,0 +1,206 @@ +import os +import hashlib +import numpy as np +import torch +from PIL import Image, ImageOps +import itertools + +import folder_paths +from comfy.k_diffusion.utils import FolderOfImages +from comfy.utils import common_upscale, ProgressBar +from .logger import logger +from .utils import BIGMAX, calculate_file_hash, get_sorted_dir_files_from_directory, validate_path, strip_path + + +def is_changed_load_images(directory: str, image_load_cap: int = 0, skip_first_images: int = 0, select_every_nth: int = 1, **kwargs): + if not os.path.isdir(directory): + return False + + dir_files = get_sorted_dir_files_from_directory(directory, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS) + if image_load_cap != 0: + dir_files = dir_files[:image_load_cap] + + m = hashlib.sha256() + for filepath in dir_files: + m.update(calculate_file_hash(filepath).encode()) # strings must be encoded before hashing + return m.digest().hex() + + +def validate_load_images(directory: str): + if not os.path.isdir(directory): + return f"Directory '{directory}' cannot be found." + dir_files = os.listdir(directory) + if len(dir_files) == 0: + return f"No files in directory '{directory}'." + + return True + +def images_generator(directory: str, image_load_cap: int = 0, skip_first_images: int = 0, select_every_nth: int = 1, meta_batch=None, unique_id=None): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory} cannot be found.") + dir_files = get_sorted_dir_files_from_directory(directory, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS) + + if len(dir_files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + if image_load_cap > 0: + dir_files = dir_files[:image_load_cap] + sizes = {} + has_alpha = False + for image_path in dir_files: + i = Image.open(image_path) + #exif_transpose can only ever rotate, but rotating can swap width/height + i = ImageOps.exif_transpose(i) + has_alpha |= 'A' in i.getbands() + count = sizes.get(i.size, 0) + sizes[i.size] = count +1 + size = max(sizes.items(), key=lambda x: x[1])[0] + yield size[0], size[1], has_alpha + if meta_batch is not None: + yield min(image_load_cap, len(dir_files)) or len(dir_files) + + iformat = "RGBA" if has_alpha else "RGB" + def load_image(file_path): + i = Image.open(file_path) + i = ImageOps.exif_transpose(i) + i = i.convert(iformat) + i = np.array(i, dtype=np.float32) + #This nonsense provides a nearly 50% speedup on my system + torch.from_numpy(i).div_(255) + if i.shape[0] != size[1] or i.shape[1] != size[0]: + i = torch.from_numpy(i).movedim(-1, 0).unsqueeze(0) + i = common_upscale(i, size[0], size[1], "lanczos", "center") + i = i.squeeze(0).movedim(0, -1).numpy() + if has_alpha: + i[:,:,-1] = 1 - i[:,:,-1] + return i + + total_images = len(dir_files) + processed_images = 0 + pbar = ProgressBar(total_images) + images = map(load_image, dir_files) + try: + prev_image = next(images) + while True: + next_image = next(images) + yield prev_image + processed_images += 1 + pbar.update_absolute(processed_images, total_images) + prev_image = next_image + except StopIteration: + pass + if meta_batch is not None: + meta_batch.inputs.pop(unique_id) + meta_batch.has_closed_inputs = True + if prev_image is not None: + yield prev_image + + +def load_images(directory: str, image_load_cap: int = 0, skip_first_images: int = 0, select_every_nth: int = 1, meta_batch=None, unique_id=None): + if meta_batch is None or unique_id not in meta_batch.inputs: + gen = images_generator(directory, image_load_cap, skip_first_images, select_every_nth, meta_batch, unique_id) + (width, height, has_alpha) = next(gen) + if meta_batch is not None: + meta_batch.inputs[unique_id] = (gen, width, height, has_alpha) + meta_batch.total_frames = min(meta_batch.total_frames, next(gen)) + else: + gen, width, height, has_alpha = meta_batch.inputs[unique_id] + + if meta_batch is not None: + gen = itertools.islice(gen, meta_batch.frames_per_batch) + images = torch.from_numpy(np.fromiter(gen, np.dtype((np.float32, (height, width, 3 + has_alpha))))) + if has_alpha: + #tensors are not continuous. Rewrite will be required if this is an issue + masks = images[:,:,:,3] + images = images[:,:,:,:3] + else: + masks = torch.zeros((images.size(0), 64, 64), dtype=torch.float32, device="cpu") + if len(images) == 0: + raise FileNotFoundError(f"No images could be loaded from directory '{directory}'.") + return images, masks, images.size(0) + +class LoadImagesFromDirectoryUpload: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + directories = [] + for item in os.listdir(input_dir): + if not os.path.isfile(os.path.join(input_dir, item)) and item != "clipspace": + directories.append(item) + return { + "required": { + "directory": (directories,), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + "skip_first_images": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + "meta_batch": ("VHS_BatchManager",), + }, + "hidden": { + "unique_id": "UNIQUE_ID" + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT") + RETURN_NAMES = ("IMAGE", "MASK", "frame_count") + FUNCTION = "load_images" + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + def load_images(self, directory: str, **kwargs): + directory = folder_paths.get_annotated_filepath(strip_path(directory)) + return load_images(directory, **kwargs) + + @classmethod + def IS_CHANGED(s, directory: str, **kwargs): + directory = folder_paths.get_annotated_filepath(strip_path(directory)) + return is_changed_load_images(directory, **kwargs) + + @classmethod + def VALIDATE_INPUTS(s, directory: str, **kwargs): + directory = folder_paths.get_annotated_filepath(strip_path(directory)) + return validate_load_images(directory) + + +class LoadImagesFromDirectoryPath: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"placeholder": "X://path/to/images", "vhs_path_extensions": []}), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + "skip_first_images": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + "meta_batch": ("VHS_BatchManager",), + }, + "hidden": { + "unique_id": "UNIQUE_ID" + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT") + RETURN_NAMES = ("IMAGE", "MASK", "frame_count") + FUNCTION = "load_images" + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + def load_images(self, directory: str, **kwargs): + directory = strip_path(directory) + if directory is None or validate_load_images(directory) != True: + raise Exception("directory is not valid: " + directory) + + return load_images(directory, **kwargs) + + @classmethod + def IS_CHANGED(s, directory: str, **kwargs): + if directory is None: + return "input" + return is_changed_load_images(directory, **kwargs) + + @classmethod + def VALIDATE_INPUTS(s, directory: str, **kwargs): + if directory is None: + return True + return validate_load_images(strip_path(directory)) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_video_nodes.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_video_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..87c729275a44cf02c0be0aed5d41796ac2c5b149 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_video_nodes.py @@ -0,0 +1,671 @@ +import os +import itertools +import numpy as np +import torch +from PIL import Image, ImageOps +import cv2 +import psutil +import subprocess +import re +import time + +import folder_paths +from comfy.utils import common_upscale, ProgressBar +import nodes +from comfy.k_diffusion.utils import FolderOfImages +from .logger import logger +from .utils import BIGMAX, DIMMAX, calculate_file_hash, get_sorted_dir_files_from_directory,\ + lazy_get_audio, hash_path, validate_path, strip_path, try_download_video, \ + is_url, imageOrLatent, ffmpeg_path, ENCODE_ARGS, floatOrInt + + +video_extensions = ['webm', 'mp4', 'mkv', 'gif', 'mov'] + +VHSLoadFormats = { + 'None': {}, + 'AnimateDiff': {'target_rate': 8, 'dim': (8,0,512,512)}, + 'Mochi': {'target_rate': 24, 'dim': (16,0,848,480), 'frames':(6,1)}, + 'LTXV': {'target_rate': 24, 'dim': (32,0,768,512), 'frames':(8,1)}, + 'Hunyuan': {'target_rate': 24, 'dim': (16,0,848,480), 'frames':(4,1)}, + 'Cosmos': {'target_rate': 24, 'dim': (16,0,1280,704), 'frames':(8,1)}, + 'Wan': {'target_rate': 16, 'dim': (8,0,832,480), 'frames':(4,1)}, +} +""" +External plugins may add additional formats to nodes.VHSLoadFormats +In addition to shorthand options, direct widget names will map a given dict to options. +Adding a third arguement to a frames tuple can enable strict checks on number +of loaded frames, i.e (8,1,True) +""" +if not hasattr(nodes, 'VHSLoadFormats'): + nodes.VHSLoadFormats = {} + +def get_load_formats(): + #TODO: check if {**extra_config.VHSLoafFormats, **VHSLoadFormats} has minimum version + formats = {} + formats.update(nodes.VHSLoadFormats) + formats.update(VHSLoadFormats) + return (list(formats.keys()), + {'default': 'AnimateDiff', 'formats': formats}) +def get_format(format): + if format in VHSLoadFormats: + return VHSLoadFormats[format] + return nodes.VHSLoadFormats.get(format, {}) + +def is_gif(filename) -> bool: + file_parts = filename.split('.') + return len(file_parts) > 1 and file_parts[-1] == "gif" + + +def target_size(width, height, custom_width, custom_height, downscale_ratio=8) -> tuple[int, int]: + if downscale_ratio is None: + downscale_ratio = 8 + if custom_width == 0 and custom_height == 0: + pass + elif custom_height == 0: + height *= custom_width/width + width = custom_width + elif custom_width == 0: + width *= custom_height/height + height = custom_height + else: + width = custom_width + height = custom_height + width = int(width/downscale_ratio + 0.5) * downscale_ratio + height = int(height/downscale_ratio + 0.5) * downscale_ratio + return (width, height) + +def cv_frame_generator(video, force_rate, frame_load_cap, skip_first_frames, + select_every_nth, meta_batch=None, unique_id=None): + video_cap = cv2.VideoCapture(video) + if not video_cap.isOpened() or not video_cap.grab(): + raise ValueError(f"{video} could not be loaded with cv.") + + # extract video metadata + fps = video_cap.get(cv2.CAP_PROP_FPS) + width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + total_frames = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT)) + duration = total_frames / fps + + width = 0 + + if width <=0 or height <=0: + _, frame = video_cap.retrieve() + height, width, _ = frame.shape + + # set video_cap to look at start_index frame + total_frame_count = 0 + total_frames_evaluated = -1 + frames_added = 0 + base_frame_time = 1 / fps + prev_frame = None + + if force_rate == 0: + target_frame_time = base_frame_time + else: + target_frame_time = 1/force_rate + + if total_frames > 0: + if force_rate != 0: + yieldable_frames = int(total_frames / fps * force_rate) + else: + yieldable_frames = total_frames + if select_every_nth: + yieldable_frames //= select_every_nth + if frame_load_cap != 0: + yieldable_frames = min(frame_load_cap, yieldable_frames) + else: + yieldable_frames = 0 + yield (width, height, fps, duration, total_frames, target_frame_time, yieldable_frames) + pbar = ProgressBar(yieldable_frames) + time_offset=target_frame_time + while video_cap.isOpened(): + if time_offset < target_frame_time: + is_returned = video_cap.grab() + # if didn't return frame, video has ended + if not is_returned: + break + time_offset += base_frame_time + if time_offset < target_frame_time: + continue + time_offset -= target_frame_time + # if not at start_index, skip doing anything with frame + total_frame_count += 1 + if total_frame_count <= skip_first_frames: + continue + else: + total_frames_evaluated += 1 + + # if should not be selected, skip doing anything with frame + if total_frames_evaluated%select_every_nth != 0: + continue + + # opencv loads images in BGR format (yuck), so need to convert to RGB for ComfyUI use + # follow up: can videos ever have an alpha channel? + # To my testing: No. opencv has no support for alpha + unused, frame = video_cap.retrieve() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + # convert frame to comfyui's expected format + # TODO: frame contains no exif information. Check if opencv2 has already applied + frame = np.array(frame, dtype=np.float32) + torch.from_numpy(frame).div_(255) + if prev_frame is not None: + inp = yield prev_frame + if inp is not None: + #ensure the finally block is called + return + prev_frame = frame + frames_added += 1 + if pbar is not None: + pbar.update_absolute(frames_added, yieldable_frames) + # if cap exists and we've reached it, stop processing frames + if frame_load_cap > 0 and frames_added >= frame_load_cap: + break + if meta_batch is not None: + meta_batch.inputs.pop(unique_id) + meta_batch.has_closed_inputs = True + if prev_frame is not None: + yield prev_frame + +def ffmpeg_frame_generator(video, force_rate, frame_load_cap, start_time, + custom_width, custom_height, downscale_ratio=8, + meta_batch=None, unique_id=None): + args_input = ["-i", video] + args_dummy = [ffmpeg_path] + args_input +['-c', 'copy', '-frames:v', '1', "-f", "null", "-"] + size_base = None + fps_base = None + try: + dummy_res = subprocess.run(args_dummy, stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occurred in the ffmpeg subprocess:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + lines = dummy_res.stderr.decode(*ENCODE_ARGS) + if "Video: vp9 " in lines: + args_input = ["-c:v", "libvpx-vp9"] + args_input + args_dummy = [ffmpeg_path] + args_input +['-c', 'copy', '-frames:v', '1', "-f", "null", "-"] + try: + dummy_res = subprocess.run(args_dummy, stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occurred in the ffmpeg subprocess:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + lines = dummy_res.stderr.decode(*ENCODE_ARGS) + + for line in lines.split('\n'): + match = re.search("^ *Stream .* Video.*, ([1-9]|\\d{2,})x(\\d+)", line) + if match is not None: + size_base = [int(match.group(1)), int(match.group(2))] + fps_match = re.search(", ([\\d\\.]+) fps", line) + if fps_match: + fps_base = float(fps_match.group(1)) + else: + fps_base = 1 + alpha = re.search("(yuva|rgba|bgra|gbra)", line) is not None + break + else: + raise Exception("Failed to parse video/image information. FFMPEG output:\n" + lines) + + durs_match = re.search("Duration: (\\d+:\\d+:\\d+\\.\\d+),", lines) + if durs_match: + durs = durs_match.group(1).split(':') + duration = int(durs[0])*3600 + int(durs[1])*60 + float(durs[2]) + else: + duration = 0 + + if start_time > 0: + if start_time > 4: + post_seek = ['-ss', '4'] + args_input = ['-ss', str(start_time - 4)] + args_input + else: + post_seek = ['-ss', str(start_time)] + else: + post_seek = [] + args_all_frames = [ffmpeg_path, "-v", "error", "-an"] + \ + args_input + ["-pix_fmt", "rgba64le"] + post_seek + + vfilters = [] + if force_rate != 0: + vfilters.append("fps=fps="+str(force_rate)) + if custom_width != 0 or custom_height != 0: + size = target_size(size_base[0], size_base[1], custom_width, + custom_height, downscale_ratio=downscale_ratio) + ar = float(size[0])/float(size[1]) + if abs(size_base[0]*ar-size_base[1]) >= 1: + #Aspect ratio is changed. Crop to new aspect ratio before scale + vfilters.append(f"crop=if(gt({ar}\\,a)\\,iw\\,ih*{ar}):if(gt({ar}\\,a)\\,iw/{ar}\\,ih)") + size_arg = ':'.join(map(str,size)) + vfilters.append(f"scale={size_arg}") + else: + size = size_base + if len(vfilters) > 0: + args_all_frames += ["-vf", ",".join(vfilters)] + yieldable_frames = (force_rate or fps_base)*duration + if frame_load_cap > 0: + args_all_frames += ["-frames:v", str(frame_load_cap)] + yieldable_frames = min(yieldable_frames, frame_load_cap) + yield (size_base[0], size_base[1], fps_base, duration, fps_base * duration, + 1/(force_rate or fps_base), yieldable_frames, size[0], size[1], alpha) + + args_all_frames += ["-f", "rawvideo", "-"] + pbar = ProgressBar(yieldable_frames) + try: + with subprocess.Popen(args_all_frames, stdout=subprocess.PIPE) as proc: + #Manually buffer enough bytes for an image + bpi = size[0] * size[1] * 8 + current_bytes = bytearray(bpi) + current_offset=0 + prev_frame = None + while True: + bytes_read = proc.stdout.read(bpi - current_offset) + if bytes_read is None:#sleep to wait for more data + time.sleep(.1) + continue + if len(bytes_read) == 0:#EOF + break + current_bytes[current_offset:len(bytes_read)] = bytes_read + current_offset+=len(bytes_read) + if current_offset == bpi: + if prev_frame is not None: + yield prev_frame + pbar.update(1) + prev_frame = np.frombuffer(current_bytes, dtype=np.dtype(np.uint16).newbyteorder("<")).reshape(size[1], size[0], 4) / (2**16-1) + if not alpha: + prev_frame = prev_frame[:, :, :-1] + current_offset = 0 + except BrokenPipeError as e: + raise Exception("An error occured in the ffmpeg subprocess:\n" \ + + proc.stderr.read().decode(*ENCODE_ARGS)) + if meta_batch is not None: + meta_batch.inputs.pop(unique_id) + meta_batch.has_closed_inputs = True + if prev_frame is not None: + yield prev_frame + +#Python 3.12 adds an itertools.batched, but it's easily replicated for legacy support +def batched(it, n): + while batch := tuple(itertools.islice(it, n)): + yield batch +def batched_vae_encode(images, vae, frames_per_batch): + for batch in batched(images, frames_per_batch): + image_batch = torch.from_numpy(np.array(batch)) + yield from vae.encode(image_batch).numpy() +def resized_cv_frame_gen(custom_width, custom_height, downscale_ratio, **kwargs): + gen = cv_frame_generator(**kwargs) + info = next(gen) + width, height = info[0], info[1] + frames_per_batch = (1920 * 1080 * 16) // (width * height) or 1 + if kwargs.get('meta_batch', None) is not None: + frames_per_batch = min(frames_per_batch, kwargs['meta_batch'].frames_per_batch) + if custom_width != 0 or custom_height != 0 or downscale_ratio is not None: + new_size = target_size(width, height, custom_width, custom_height, downscale_ratio) + yield (*info, new_size[0], new_size[1], False) + if new_size[0] != width or new_size[1] != height: + def rescale(frame): + s = torch.from_numpy(np.fromiter(frame, np.dtype((np.float32, (height, width, 3))))) + s = s.movedim(-1,1) + s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center") + return s.movedim(1,-1).numpy() + yield from itertools.chain.from_iterable(map(rescale, batched(gen, frames_per_batch))) + return + else: + yield (*info, info[0], info[1], False) + yield from gen + +def load_video(meta_batch=None, unique_id=None, memory_limit_mb=None, vae=None, + generator=resized_cv_frame_gen, format='None', **kwargs): + if 'force_size' in kwargs: + kwargs.pop('force_size') + logger.warn("force_size has been removed. Did you reload the webpage after updating?") + format = get_format(format) + kwargs['video'] = strip_path(kwargs['video']) + if vae is not None: + downscale_ratio = getattr(vae, "downscale_ratio", 8) + else: + downscale_ratio = format.get('dim', (1,))[0] + if meta_batch is None or unique_id not in meta_batch.inputs: + gen = generator(meta_batch=meta_batch, unique_id=unique_id, downscale_ratio=downscale_ratio, **kwargs) + (width, height, fps, duration, total_frames, target_frame_time, yieldable_frames, new_width, new_height, alpha) = next(gen) + + if meta_batch is not None: + meta_batch.inputs[unique_id] = (gen, width, height, fps, duration, total_frames, target_frame_time, yieldable_frames, new_width, new_height, alpha) + if yieldable_frames: + meta_batch.total_frames = min(meta_batch.total_frames, yieldable_frames) + + else: + (gen, width, height, fps, duration, total_frames, target_frame_time, yieldable_frames, new_width, new_height, alpha) = meta_batch.inputs[unique_id] + + memory_limit = None + if memory_limit_mb is not None: + memory_limit *= 2 ** 20 + else: + #TODO: verify if garbage collection should be performed here. + #leaves ~128 MB unreserved for safety + try: + memory_limit = (psutil.virtual_memory().available + psutil.swap_memory().free) - 2 ** 27 + except: + logger.warn("Failed to calculate available memory. Memory load limit has been disabled") + memory_limit = BIGMAX + if vae is not None: + #space required to load as f32, exist as latent with wiggle room, decode to f32 + max_loadable_frames = int(memory_limit//(width*height*3*(4+4+1/10))) + else: + #TODO: use better estimate for when vae is not None + #Consider completely ignoring for load_latent case? + max_loadable_frames = int(memory_limit//(width*height*3*(.1))) + if meta_batch is not None: + if 'frames' in format: + if meta_batch.frames_per_batch % format['frames'][0] != format['frames'][1]: + error = (meta_batch.frames_per_batch - format['frames'][1]) % format['frames'][0] + suggested = meta_batch.frames_per_batch - error + if error > format['frames'][0] / 2: + suggested += format['frames'][0] + raise RuntimeError(f"The chosen frames per batch is incompatible with the selected format. Try {suggested}") + if meta_batch.frames_per_batch > max_loadable_frames: + raise RuntimeError(f"Meta Batch set to {meta_batch.frames_per_batch} frames but only {max_loadable_frames} can fit in memory") + gen = itertools.islice(gen, meta_batch.frames_per_batch) + else: + original_gen = gen + gen = itertools.islice(gen, max_loadable_frames) + frames_per_batch = (1920 * 1080 * 16) // (width * height) or 1 + if vae is not None: + gen = batched_vae_encode(gen, vae, frames_per_batch) + vw,vh = new_width//downscale_ratio, new_height//downscale_ratio + channels = getattr(vae, 'latent_channels', 4) + images = torch.from_numpy(np.fromiter(gen, np.dtype((np.float32, (channels,vh,vw))))) + else: + #Some minor wizardry to eliminate a copy and reduce max memory by a factor of ~2 + images = torch.from_numpy(np.fromiter(gen, np.dtype((np.float32, (new_height, new_width, 4 if alpha else 3))))) + if meta_batch is None and memory_limit is not None: + try: + next(original_gen) + raise RuntimeError(f"Memory limit hit after loading {len(images)} frames. Stopping execution.") + except StopIteration: + pass + if len(images) == 0: + raise RuntimeError("No frames generated") + if 'frames' in format and len(images) % format['frames'][0] != format['frames'][1]: + err_msg = f"The number of frames loaded {len(images)}, does not match the requirements of the currently selected format." + if len(format['frames']) > 2 and format['frames'][2]: + raise RuntimeError(err_msg) + div, mod = format['frames'][:2] + frames = (len(images) - mod) // div * div + mod + images = images[:frames] + #Commenting out log message since it's displayed in UI. consider further + #logger.warn(err_msg + f" Output has been truncated to {len(images)} frames.") + if 'start_time' in kwargs: + start_time = kwargs['start_time'] + else: + start_time = kwargs['skip_first_frames'] * target_frame_time + target_frame_time *= kwargs.get('select_every_nth', 1) + #Setup lambda for lazy audio capture + audio = lazy_get_audio(kwargs['video'], start_time, kwargs['frame_load_cap']*target_frame_time) + #Adjust target_frame_time for select_every_nth + video_info = { + "source_fps": fps, + "source_frame_count": total_frames, + "source_duration": duration, + "source_width": width, + "source_height": height, + "loaded_fps": 1/target_frame_time, + "loaded_frame_count": len(images), + "loaded_duration": len(images) * target_frame_time, + "loaded_width": new_width, + "loaded_height": new_height, + } + if vae is None: + return (images, len(images), audio, video_info) + else: + return ({"samples": images}, len(images), audio, video_info) + + + +class LoadVideoUpload: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [] + for f in os.listdir(input_dir): + if os.path.isfile(os.path.join(input_dir, f)): + file_parts = f.split('.') + if len(file_parts) > 1 and (file_parts[-1].lower() in video_extensions): + files.append(f) + return {"required": { + "video": (sorted(files),), + "force_rate": (floatOrInt, {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}), + "custom_width": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "custom_height": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1, "disable": 0}), + "skip_first_frames": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + }, + "optional": { + "meta_batch": ("VHS_BatchManager",), + "vae": ("VAE",), + "format": get_load_formats(), + }, + "hidden": { + "force_size": "STRING", + "unique_id": "UNIQUE_ID" + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = (imageOrLatent, "INT", "AUDIO", "VHS_VIDEOINFO") + RETURN_NAMES = ("IMAGE", "frame_count", "audio", "video_info") + + FUNCTION = "load_video" + + def load_video(self, **kwargs): + kwargs['video'] = folder_paths.get_annotated_filepath(strip_path(kwargs['video'])) + return load_video(**kwargs) + + @classmethod + def IS_CHANGED(s, video, **kwargs): + image_path = folder_paths.get_annotated_filepath(video) + return calculate_file_hash(image_path) + + @classmethod + def VALIDATE_INPUTS(s, video): + if not folder_paths.exists_annotated_filepath(video): + return "Invalid video file: {}".format(video) + return True + + +class LoadVideoPath: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video": ("STRING", {"placeholder": "X://insert/path/here.mp4", "vhs_path_extensions": video_extensions}), + "force_rate": (floatOrInt, {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}), + "custom_width": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "custom_height": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1, "disable": 0}), + "skip_first_frames": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}), + }, + "optional": { + "meta_batch": ("VHS_BatchManager",), + "vae": ("VAE",), + "format": get_load_formats(), + }, + "hidden": { + "force_size": "STRING", + "unique_id": "UNIQUE_ID" + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = (imageOrLatent, "INT", "AUDIO", "VHS_VIDEOINFO") + RETURN_NAMES = ("IMAGE", "frame_count", "audio", "video_info") + + FUNCTION = "load_video" + + def load_video(self, **kwargs): + if kwargs['video'] is None or validate_path(kwargs['video']) != True: + raise Exception("video is not a valid path: " + kwargs['video']) + if is_url(kwargs['video']): + kwargs['video'] = try_download_video(kwargs['video']) or kwargs['video'] + return load_video(**kwargs) + + @classmethod + def IS_CHANGED(s, video, **kwargs): + return hash_path(video) + + @classmethod + def VALIDATE_INPUTS(s, video): + return validate_path(video, allow_none=True) + +class LoadVideoFFmpegUpload: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [] + for f in os.listdir(input_dir): + if os.path.isfile(os.path.join(input_dir, f)): + file_parts = f.split('.') + if len(file_parts) > 1 and (file_parts[-1].lower() in video_extensions): + files.append(f) + return {"required": { + "video": (sorted(files),), + "force_rate": (floatOrInt, {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}), + "custom_width": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "custom_height": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1, "disable": 0}), + "start_time": ("FLOAT", {"default": 0, "min": 0, "max": BIGMAX, "step": .001, "widgetType": "VHSTIMESTAMP"}), + }, + "optional": { + "meta_batch": ("VHS_BatchManager",), + "vae": ("VAE",), + "format": get_load_formats(), + }, + "hidden": { + "force_size": "STRING", + "unique_id": "UNIQUE_ID" + + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = (imageOrLatent, "MASK", "AUDIO", "VHS_VIDEOINFO") + RETURN_NAMES = ("IMAGE", "mask", "audio", "video_info") + + FUNCTION = "load_video" + + def load_video(self, **kwargs): + kwargs['video'] = folder_paths.get_annotated_filepath(strip_path(kwargs['video'])) + image, _, audio, video_info = load_video(**kwargs, generator=ffmpeg_frame_generator) + if image.size(3) == 4: + return (image[:,:,:,:3], 1-image[:,:,:,3], audio, video_info) + return (image, torch.zeros(image.size(0), 64, 64, device="cpu"), audio, video_info) + + @classmethod + def IS_CHANGED(s, video, **kwargs): + image_path = folder_paths.get_annotated_filepath(video) + return calculate_file_hash(image_path) + + @classmethod + def VALIDATE_INPUTS(s, video): + if not folder_paths.exists_annotated_filepath(video): + return "Invalid video file: {}".format(video) + return True + + +class LoadVideoFFmpegPath: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video": ("STRING", {"placeholder": "X://insert/path/here.mp4", "vhs_path_extensions": video_extensions}), + "force_rate": (floatOrInt, {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}), + "custom_width": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "custom_height": ("INT", {"default": 0, "min": 0, "max": DIMMAX, 'disable': 0}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1, "disable": 0}), + "start_time": ("FLOAT", {"default": 0, "min": 0, "max": BIGMAX, "step": .001, "widgetType": "VHSTIMESTAMP"}), + }, + "optional": { + "meta_batch": ("VHS_BatchManager",), + "vae": ("VAE",), + "format": get_load_formats(), + }, + "hidden": { + "force_size": "STRING", + "unique_id": "UNIQUE_ID" + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = (imageOrLatent, "MASK", "AUDIO", "VHS_VIDEOINFO") + RETURN_NAMES = ("IMAGE", "mask", "audio", "video_info") + + FUNCTION = "load_video" + + def load_video(self, **kwargs): + if kwargs['video'] is None or validate_path(kwargs['video']) != True: + raise Exception("video is not a valid path: " + kwargs['video']) + if is_url(kwargs['video']): + kwargs['video'] = try_download_video(kwargs['video']) or kwargs['video'] + image, _, audio, video_info = load_video(**kwargs, generator=ffmpeg_frame_generator) + if isinstance(image, dict): + return (image, None, audio, video_info) + if image.size(3) == 4: + return (image[:,:,:,:3], 1-image[:,:,:,3], audio, video_info) + return (image, torch.zeros(image.size(0), 64, 64, device="cpu"), audio, video_info) + + @classmethod + def IS_CHANGED(s, video, **kwargs): + return hash_path(video) + + @classmethod + def VALIDATE_INPUTS(s, video): + return validate_path(video, allow_none=True) + +class LoadImagePath: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("STRING", {"placeholder": "X://insert/path/here.png", "vhs_path_extensions": list(FolderOfImages.IMG_EXTENSIONS)}), + "custom_width": ("INT", {"default": 0, "min": 0, "max": DIMMAX, "step": 8, 'disable': 0}), + "custom_height": ("INT", {"default": 0, "min": 0, "max": DIMMAX, "step": 8, 'disable': 0}), + }, + "optional": { + "vae": ("VAE",), + }, + "hidden": { + "force_size": "STRING", + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = (imageOrLatent, "MASK") + RETURN_NAMES = ("IMAGE", "mask") + + FUNCTION = "load_image" + + def load_image(self, **kwargs): + if kwargs['image'] is None or validate_path(kwargs['image']) != True: + raise Exception("image is not a valid path: " + kwargs['image']) + kwargs.update({'video': kwargs['image'], 'force_rate': 0, 'frame_load_cap': 0, + 'start_time': 0}) + kwargs.pop('image') + image, _, _, _ = load_video(**kwargs, generator=ffmpeg_frame_generator) + if isinstance(image, dict): + return (image, None) + if image.size(3) == 4: + return (image[:,:,:,:3], 1-image[:,:,:,3]) + return (image, torch.zeros(image.size(0), 64, 64, device="cpu")) + + @classmethod + def IS_CHANGED(s, image, **kwargs): + return hash_path(image) + + @classmethod + def VALIDATE_INPUTS(s, image): + return validate_path(image, allow_none=True) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/logger.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..579b55aba8c9a2299b8170ec41b6d805e4cdfb97 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/logger.py @@ -0,0 +1,36 @@ +import sys +import copy +import logging + + +class ColoredFormatter(logging.Formatter): + COLORS = { + "DEBUG": "\033[0;36m", # CYAN + "INFO": "\033[0;32m", # GREEN + "WARNING": "\033[0;33m", # YELLOW + "ERROR": "\033[0;31m", # RED + "CRITICAL": "\033[0;37;41m", # WHITE ON RED + "RESET": "\033[0m", # RESET COLOR + } + + def format(self, record): + colored_record = copy.copy(record) + levelname = colored_record.levelname + seq = self.COLORS.get(levelname, self.COLORS["RESET"]) + colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}" + return super().format(colored_record) + + +# Create a new logger +logger = logging.getLogger("VideoHelperSuite") +logger.propagate = False + +# Add handler if we don't have one. +if not logger.handlers: + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(ColoredFormatter("[%(name)s] - %(levelname)s - %(message)s")) + logger.addHandler(handler) + +# Configure logger +loglevel = logging.INFO +logger.setLevel(loglevel) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/nodes.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb8164106c3971e03a328edd97531ce9e92b395 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/nodes.py @@ -0,0 +1,1120 @@ +import os +import sys +import json +import subprocess +import numpy as np +import re +import datetime +from typing import List +import torch +from PIL import Image, ExifTags +from PIL.PngImagePlugin import PngInfo +from pathlib import Path +from string import Template +import itertools +import functools + +import folder_paths +from .logger import logger +from .image_latent_nodes import * +from .load_video_nodes import LoadVideoUpload, LoadVideoPath, LoadVideoFFmpegUpload, LoadVideoFFmpegPath, LoadImagePath +from .load_images_nodes import LoadImagesFromDirectoryUpload, LoadImagesFromDirectoryPath +from .batched_nodes import VAEEncodeBatched, VAEDecodeBatched +from .utils import ffmpeg_path, get_audio, hash_path, validate_path, requeue_workflow, \ + gifski_path, calculate_file_hash, strip_path, try_download_video, is_url, \ + imageOrLatent, BIGMAX, merge_filter_args, ENCODE_ARGS, floatOrInt, cached, \ + ContainsAll +from comfy.utils import ProgressBar + +if 'VHS_video_formats' not in folder_paths.folder_names_and_paths: + folder_paths.folder_names_and_paths["VHS_video_formats"] = ((),{".json"}) +if len(folder_paths.folder_names_and_paths['VHS_video_formats'][1]) == 0: + folder_paths.folder_names_and_paths["VHS_video_formats"][1].add(".json") +audio_extensions = ['mp3', 'mp4', 'wav', 'ogg'] + +def flatten_list(l): + ret = [] + for e in l: + if isinstance(e, list): + ret.extend(e) + else: + ret.append(e) + return ret + +def iterate_format(video_format, for_widgets=True): + """Provides an iterator over widgets, or arguments""" + def indirector(cont, index): + if isinstance(cont[index], list) and (not for_widgets + or len(cont[index])> 1 and not isinstance(cont[index][1], dict)): + inp = yield cont[index] + if inp is not None: + cont[index] = inp + yield + for k in video_format: + if k == "extra_widgets": + if for_widgets: + yield from video_format["extra_widgets"] + elif k.endswith("_pass"): + for i in range(len(video_format[k])): + yield from indirector(video_format[k], i) + if not for_widgets: + video_format[k] = flatten_list(video_format[k]) + else: + yield from indirector(video_format, k) + +base_formats_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "video_formats") +@cached(5) +def get_video_formats(): + format_files = {} + for format_name in folder_paths.get_filename_list("VHS_video_formats"): + format_files[format_name] = folder_paths.get_full_path("VHS_video_formats", format_name) + for item in os.scandir(base_formats_dir): + if not item.is_file() or not item.name.endswith('.json'): + continue + format_files[item.name[:-5]] = item.path + formats = [] + format_widgets = {} + for format_name, path in format_files.items(): + with open(path, 'r') as stream: + video_format = json.load(stream) + if "gifski_pass" in video_format and gifski_path is None: + #Skip format + continue + widgets = list(iterate_format(video_format)) + formats.append("video/" + format_name) + if (len(widgets) > 0): + format_widgets["video/"+ format_name] = widgets + return formats, format_widgets + +def apply_format_widgets(format_name, kwargs): + if os.path.exists(os.path.join(base_formats_dir, format_name + ".json")): + video_format_path = os.path.join(base_formats_dir, format_name + ".json") + else: + video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name) + with open(video_format_path, 'r') as stream: + video_format = json.load(stream) + for w in iterate_format(video_format): + if w[0] not in kwargs: + if len(w) > 2 and 'default' in w[2]: + default = w[2]['default'] + else: + if type(w[1]) is list: + default = w[1][0] + else: + #NOTE: This doesn't respect max/min, but should be good enough as a fallback to a fallback to a fallback + default = {"BOOLEAN": False, "INT": 0, "FLOAT": 0, "STRING": ""}[w[1]] + kwargs[w[0]] = default + logger.warn(f"Missing input for {w[0][0]} has been set to {default}") + wit = iterate_format(video_format, False) + for w in wit: + while isinstance(w, list): + if len(w) == 1: + #TODO: mapping=kwargs should be safer, but results in key errors, investigate why + w = [Template(x).substitute(**kwargs) for x in w[0]] + break + elif isinstance(w[1], dict): + w = w[1][str(kwargs[w[0]])] + elif len(w) > 3: + w = Template(w[3]).substitute(val=kwargs[w[0]]) + else: + w = str(kwargs[w[0]]) + wit.send(w) + return video_format + +def tensor_to_int(tensor, bits): + tensor = tensor.cpu().numpy() * (2**bits-1) + 0.5 + return np.clip(tensor, 0, (2**bits-1)) +def tensor_to_shorts(tensor): + return tensor_to_int(tensor, 16).astype(np.uint16) +def tensor_to_bytes(tensor): + return tensor_to_int(tensor, 8).astype(np.uint8) + +def ffmpeg_process(args, video_format, video_metadata, file_path, env): + + res = None + frame_data = yield + total_frames_output = 0 + if video_format.get('save_metadata', 'False') != 'False': + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + metadata_path = os.path.join(folder_paths.get_temp_directory(), "metadata.txt") + #metadata from file should escape = ; # \ and newline + def escape_ffmpeg_metadata(key, value): + value = str(value) + value = value.replace("\\","\\\\") + value = value.replace(";","\\;") + value = value.replace("#","\\#") + value = value.replace("=","\\=") + value = value.replace("\n","\\\n") + return f"{key}={value}" + + with open(metadata_path, "w") as f: + f.write(";FFMETADATA1\n") + if "prompt" in video_metadata: + f.write(escape_ffmpeg_metadata("prompt", json.dumps(video_metadata["prompt"])) + "\n") + if "workflow" in video_metadata: + f.write(escape_ffmpeg_metadata("workflow", json.dumps(video_metadata["workflow"])) + "\n") + for k, v in video_metadata.items(): + if k not in ["prompt", "workflow"]: + f.write(escape_ffmpeg_metadata(k, json.dumps(v)) + "\n") + + m_args = args[:1] + ["-i", metadata_path] + args[1:] + ["-metadata", "creation_time=now", "-movflags", "use_metadata_tags"] + with subprocess.Popen(m_args + [file_path], stderr=subprocess.PIPE, + stdin=subprocess.PIPE, env=env) as proc: + try: + while frame_data is not None: + proc.stdin.write(frame_data) + #TODO: skip flush for increased speed + frame_data = yield + total_frames_output+=1 + proc.stdin.flush() + proc.stdin.close() + res = proc.stderr.read() + except BrokenPipeError as e: + err = proc.stderr.read() + #Check if output file exists. If it does, the re-execution + #will also fail. This obscures the cause of the error + #and seems to never occur concurrent to the metadata issue + if os.path.exists(file_path): + raise Exception("An error occurred in the ffmpeg subprocess:\n" \ + + err.decode(*ENCODE_ARGS)) + #Res was not set + print(err.decode(*ENCODE_ARGS), end="", file=sys.stderr) + logger.warn("An error occurred when saving with metadata") + if res != b'': + with subprocess.Popen(args + [file_path], stderr=subprocess.PIPE, + stdin=subprocess.PIPE, env=env) as proc: + try: + while frame_data is not None: + proc.stdin.write(frame_data) + frame_data = yield + total_frames_output+=1 + proc.stdin.flush() + proc.stdin.close() + res = proc.stderr.read() + except BrokenPipeError as e: + res = proc.stderr.read() + raise Exception("An error occurred in the ffmpeg subprocess:\n" \ + + res.decode(*ENCODE_ARGS)) + yield total_frames_output + if len(res) > 0: + print(res.decode(*ENCODE_ARGS), end="", file=sys.stderr) + +def gifski_process(args, dimensions, frame_rate, video_format, file_path, env): + frame_data = yield + with subprocess.Popen(args + video_format['main_pass'] + ['-f', 'yuv4mpegpipe', '-'], + stderr=subprocess.PIPE, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, env=env) as procff: + with subprocess.Popen([gifski_path] + video_format['gifski_pass'] + + ['-W', f'{dimensions[0]}', '-H', f'{dimensions[1]}'] + + ['-r', f'{frame_rate}'] + + ['-q', '-o', file_path, '-'], stderr=subprocess.PIPE, + stdin=procff.stdout, stdout=subprocess.PIPE, + env=env) as procgs: + try: + while frame_data is not None: + procff.stdin.write(frame_data) + frame_data = yield + procff.stdin.flush() + procff.stdin.close() + resff = procff.stderr.read() + resgs = procgs.stderr.read() + outgs = procgs.stdout.read() + except BrokenPipeError as e: + procff.stdin.close() + resff = procff.stderr.read() + resgs = procgs.stderr.read() + raise Exception("An error occurred while creating gifski output\n" \ + + "Make sure you are using gifski --version >=1.32.0\nffmpeg: " \ + + resff.decode(*ENCODE_ARGS) + '\ngifski: ' + resgs.decode(*ENCODE_ARGS)) + if len(resff) > 0: + print(resff.decode(*ENCODE_ARGS), end="", file=sys.stderr) + if len(resgs) > 0: + print(resgs.decode(*ENCODE_ARGS), end="", file=sys.stderr) + #should always be empty as the quiet flag is passed + if len(outgs) > 0: + print(outgs.decode(*ENCODE_ARGS)) + +def to_pingpong(inp): + if not hasattr(inp, "__getitem__"): + inp = list(inp) + yield from inp + for i in range(len(inp)-2,0,-1): + yield inp[i] + +class VideoCombine: + @classmethod + def INPUT_TYPES(s): + ffmpeg_formats, format_widgets = get_video_formats() + format_widgets["image/webp"] = [['lossless', "BOOLEAN", {'default': True}]] + return { + "required": { + "images": (imageOrLatent,), + "frame_rate": ( + floatOrInt, + {"default": 8, "min": 1, "step": 1}, + ), + "loop_count": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), + "filename_prefix": ("STRING", {"default": "AnimateDiff"}), + "format": (["image/gif", "image/webp"] + ffmpeg_formats, {'formats': format_widgets}), + "pingpong": ("BOOLEAN", {"default": False}), + "save_output": ("BOOLEAN", {"default": True}), + }, + "optional": { + "audio": ("AUDIO",), + "meta_batch": ("VHS_BatchManager",), + "vae": ("VAE",), + }, + "hidden": ContainsAll({ + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + "unique_id": "UNIQUE_ID" + }), + } + + RETURN_TYPES = ("VHS_FILENAMES",) + RETURN_NAMES = ("Filenames",) + OUTPUT_NODE = True + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + FUNCTION = "combine_video" + + def combine_video( + self, + frame_rate: int, + loop_count: int, + images=None, + latents=None, + filename_prefix="AnimateDiff", + format="image/gif", + pingpong=False, + save_output=True, + prompt=None, + extra_pnginfo=None, + audio=None, + unique_id=None, + manual_format_widgets=None, + meta_batch=None, + vae=None, + **kwargs + ): + if latents is not None: + images = latents + if images is None: + return ((save_output, []),) + if vae is not None: + if isinstance(images, dict): + images = images['samples'] + else: + vae = None + + if isinstance(images, torch.Tensor) and images.size(0) == 0: + return ((save_output, []),) + num_frames = len(images) + pbar = ProgressBar(num_frames) + if vae is not None: + downscale_ratio = getattr(vae, "downscale_ratio", 8) + width = images.size(-1)*downscale_ratio + height = images.size(-2)*downscale_ratio + frames_per_batch = (1920 * 1080 * 16) // (width * height) or 1 + #Python 3.12 adds an itertools.batched, but it's easily replicated for legacy support + def batched(it, n): + while batch := tuple(itertools.islice(it, n)): + yield batch + def batched_encode(images, vae, frames_per_batch): + for batch in batched(iter(images), frames_per_batch): + image_batch = torch.from_numpy(np.array(batch)) + yield from vae.decode(image_batch) + images = batched_encode(images, vae, frames_per_batch) + first_image = next(images) + #repush first_image + images = itertools.chain([first_image], images) + #A single image has 3 dimensions. Discard higher dimensions + while len(first_image.shape) > 3: + first_image = first_image[0] + else: + first_image = images[0] + images = iter(images) + # get output information + output_dir = ( + folder_paths.get_output_directory() + if save_output + else folder_paths.get_temp_directory() + ) + ( + full_output_folder, + filename, + _, + subfolder, + _, + ) = folder_paths.get_save_image_path(filename_prefix, output_dir) + output_files = [] + + metadata = PngInfo() + video_metadata = {} + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + video_metadata["prompt"] = json.dumps(prompt) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + video_metadata[x] = extra_pnginfo[x] + extra_options = extra_pnginfo.get('workflow', {}).get('extra', {}) + else: + extra_options = {} + metadata.add_text("CreationTime", datetime.datetime.now().isoformat(" ")[:19]) + + if meta_batch is not None and unique_id in meta_batch.outputs: + (counter, output_process) = meta_batch.outputs[unique_id] + else: + # comfy counter workaround + max_counter = 0 + + # Loop through the existing files + matcher = re.compile(f"{re.escape(filename)}_(\\d+)\\D*\\..+", re.IGNORECASE) + for existing_file in os.listdir(full_output_folder): + # Check if the file matches the expected format + match = matcher.fullmatch(existing_file) + if match: + # Extract the numeric portion of the filename + file_counter = int(match.group(1)) + # Update the maximum counter value if necessary + if file_counter > max_counter: + max_counter = file_counter + + # Increment the counter by 1 to get the next available value + counter = max_counter + 1 + output_process = None + + # save first frame as png to keep metadata + first_image_file = f"{filename}_{counter:05}.png" + file_path = os.path.join(full_output_folder, first_image_file) + if extra_options.get('VHS_MetadataImage', True) != False: + Image.fromarray(tensor_to_bytes(first_image)).save( + file_path, + pnginfo=metadata, + compress_level=4, + ) + output_files.append(file_path) + + format_type, format_ext = format.split("/") + if format_type == "image": + if meta_batch is not None: + raise Exception("Pillow('image/') formats are not compatible with batched output") + image_kwargs = {} + if format_ext == "gif": + image_kwargs['disposal'] = 2 + if format_ext == "webp": + #Save timestamp information + exif = Image.Exif() + exif[ExifTags.IFD.Exif] = {36867: datetime.datetime.now().isoformat(" ")[:19]} + image_kwargs['exif'] = exif + image_kwargs['lossless'] = kwargs.get("lossless", True) + file = f"{filename}_{counter:05}.{format_ext}" + file_path = os.path.join(full_output_folder, file) + if pingpong: + images = to_pingpong(images) + def frames_gen(images): + for i in images: + pbar.update(1) + yield Image.fromarray(tensor_to_bytes(i)) + frames = frames_gen(images) + # Use pillow directly to save an animated image + next(frames).save( + file_path, + format=format_ext.upper(), + save_all=True, + append_images=frames, + duration=round(1000 / frame_rate), + loop=loop_count, + compress_level=4, + **image_kwargs + ) + output_files.append(file_path) + else: + # Use ffmpeg to save a video + if ffmpeg_path is None: + raise ProcessLookupError(f"ffmpeg is required for video outputs and could not be found.\nIn order to use video outputs, you must either:\n- Install imageio-ffmpeg with pip,\n- Place a ffmpeg executable in {os.path.abspath('')}, or\n- Install ffmpeg and add it to the system path.") + + if manual_format_widgets is not None: + logger.warn("Format args can now be passed directly. The manual_format_widgets argument is now deprecated") + kwargs.update(manual_format_widgets) + + has_alpha = first_image.shape[-1] == 4 + kwargs["has_alpha"] = has_alpha + video_format = apply_format_widgets(format_ext, kwargs) + dim_alignment = video_format.get("dim_alignment", 2) + if (first_image.shape[1] % dim_alignment) or (first_image.shape[0] % dim_alignment): + #output frames must be padded + to_pad = (-first_image.shape[1] % dim_alignment, + -first_image.shape[0] % dim_alignment) + padding = (to_pad[0]//2, to_pad[0] - to_pad[0]//2, + to_pad[1]//2, to_pad[1] - to_pad[1]//2) + padfunc = torch.nn.ReplicationPad2d(padding) + def pad(image): + image = image.permute((2,0,1))#HWC to CHW + padded = padfunc(image.to(dtype=torch.float32)) + return padded.permute((1,2,0)) + images = map(pad, images) + dimensions = (-first_image.shape[1] % dim_alignment + first_image.shape[1], + -first_image.shape[0] % dim_alignment + first_image.shape[0]) + logger.warn("Output images were not of valid resolution and have had padding applied") + else: + dimensions = (first_image.shape[1], first_image.shape[0]) + if pingpong: + if meta_batch is not None: + logger.error("pingpong is incompatible with batched output") + images = to_pingpong(images) + if num_frames > 2: + num_frames += num_frames -2 + pbar.total = num_frames + if loop_count > 0: + loop_args = ["-vf", "loop=loop=" + str(loop_count)+":size=" + str(num_frames)] + else: + loop_args = [] + if video_format.get('input_color_depth', '8bit') == '16bit': + images = map(tensor_to_shorts, images) + if has_alpha: + i_pix_fmt = 'rgba64' + else: + i_pix_fmt = 'rgb48' + else: + images = map(tensor_to_bytes, images) + if has_alpha: + i_pix_fmt = 'rgba' + else: + i_pix_fmt = 'rgb24' + file = f"{filename}_{counter:05}.{video_format['extension']}" + file_path = os.path.join(full_output_folder, file) + bitrate_arg = [] + bitrate = video_format.get('bitrate') + if bitrate is not None: + bitrate_arg = ["-b:v", str(bitrate) + "M" if video_format.get('megabit') == 'True' else str(bitrate) + "K"] + args = [ffmpeg_path, "-v", "error", "-f", "rawvideo", "-pix_fmt", i_pix_fmt, + # The image data is in an undefined generic RGB color space, which in practice means sRGB. + # sRGB has the same primaries and matrix as BT.709, but a different transfer function (gamma), + # called by the sRGB standard name IEC 61966-2-1. However, video hosting platforms like YouTube + # standardize on full BT.709 and will convert the colors accordingly. This last minute change + # in colors can be confusing to users. We can counter it by lying about the transfer function + # on a per format basis, i.e. for video we will lie to FFmpeg that it is already BT.709. Also, + # because the input data is in RGB (not YUV) it is more efficient (fewer scale filter invocations) + # to specify the input color space as RGB and then later, if the format actually wants YUV, + # to convert it to BT.709 YUV via FFmpeg's -vf "scale=out_color_matrix=bt709". + "-color_range", "pc", "-colorspace", "rgb", "-color_primaries", "bt709", + "-color_trc", video_format.get("fake_trc", "iec61966-2-1"), + "-s", f"{dimensions[0]}x{dimensions[1]}", "-r", str(frame_rate), "-i", "-"] \ + + loop_args + + images = map(lambda x: x.tobytes(), images) + env=os.environ.copy() + if "environment" in video_format: + env.update(video_format["environment"]) + + if "pre_pass" in video_format: + if meta_batch is not None: + #Performing a prepass requires keeping access to all frames. + #Potential solutions include keeping just output frames in + #memory or using 3 passes with intermediate file, but + #very long gifs probably shouldn't be encouraged + raise Exception("Formats which require a pre_pass are incompatible with Batch Manager.") + images = [b''.join(images)] + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + in_args_len = args.index("-i") + 2 # The index after ["-i", "-"] + pre_pass_args = args[:in_args_len] + video_format['pre_pass'] + merge_filter_args(pre_pass_args) + try: + subprocess.run(pre_pass_args, input=images[0], env=env, + capture_output=True, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occurred in the ffmpeg prepass:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + if "inputs_main_pass" in video_format: + in_args_len = args.index("-i") + 2 # The index after ["-i", "-"] + args = args[:in_args_len] + video_format['inputs_main_pass'] + args[in_args_len:] + + if output_process is None: + if 'gifski_pass' in video_format: + format = 'image/gif' + output_process = gifski_process(args, dimensions, frame_rate, video_format, file_path, env) + audio = None + else: + args += video_format['main_pass'] + bitrate_arg + merge_filter_args(args) + output_process = ffmpeg_process(args, video_format, video_metadata, file_path, env) + #Proceed to first yield + output_process.send(None) + if meta_batch is not None: + meta_batch.outputs[unique_id] = (counter, output_process) + + for image in images: + pbar.update(1) + output_process.send(image) + if meta_batch is not None: + requeue_workflow((meta_batch.unique_id, not meta_batch.has_closed_inputs)) + if meta_batch is None or meta_batch.has_closed_inputs: + #Close pipe and wait for termination. + try: + total_frames_output = output_process.send(None) + output_process.send(None) + except StopIteration: + pass + if meta_batch is not None: + meta_batch.outputs.pop(unique_id) + if len(meta_batch.outputs) == 0: + meta_batch.reset() + else: + #batch is unfinished + #TODO: Check if empty output breaks other custom nodes + return {"ui": {"unfinished_batch": [True]}, "result": ((save_output, []),)} + + output_files.append(file_path) + + + a_waveform = None + if audio is not None: + try: + #safely check if audio produced by VHS_LoadVideo actually exists + a_waveform = audio['waveform'] + except: + pass + if a_waveform is not None: + # Create audio file if input was provided + output_file_with_audio = f"{filename}_{counter:05}-audio.{video_format['extension']}" + output_file_with_audio_path = os.path.join(full_output_folder, output_file_with_audio) + if "audio_pass" not in video_format: + logger.warn("Selected video format does not have explicit audio support") + video_format["audio_pass"] = ["-c:a", "libopus"] + + + # FFmpeg command with audio re-encoding + #TODO: expose audio quality options if format widgets makes it in + #Reconsider forcing apad/shortest + channels = audio['waveform'].size(1) + min_audio_dur = total_frames_output / frame_rate + 1 + if video_format.get('trim_to_audio', 'False') != 'False': + apad = [] + else: + apad = ["-af", "apad=whole_dur="+str(min_audio_dur)] + mux_args = [ffmpeg_path, "-v", "error", "-n", "-i", file_path, + "-ar", str(audio['sample_rate']), "-ac", str(channels), + "-f", "f32le", "-i", "-", "-c:v", "copy"] \ + + video_format["audio_pass"] \ + + apad + ["-shortest", output_file_with_audio_path] + + audio_data = audio['waveform'].squeeze(0).transpose(0,1) \ + .numpy().tobytes() + merge_filter_args(mux_args, '-af') + try: + res = subprocess.run(mux_args, input=audio_data, + env=env, capture_output=True, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occured in the ffmpeg subprocess:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + if res.stderr: + print(res.stderr.decode(*ENCODE_ARGS), end="", file=sys.stderr) + output_files.append(output_file_with_audio_path) + #Return this file with audio to the webui. + #It will be muted unless opened or saved with right click + file = output_file_with_audio + if extra_options.get('VHS_KeepIntermediate', True) == False: + for intermediate in output_files[1:-1]: + if os.path.exists(intermediate): + os.remove(intermediate) + preview = { + "filename": file, + "subfolder": subfolder, + "type": "output" if save_output else "temp", + "format": format, + "frame_rate": frame_rate, + "workflow": first_image_file, + "fullpath": output_files[-1], + } + if num_frames == 1 and 'png' in format and '%03d' in file: + preview['format'] = 'image/png' + preview['filename'] = file.replace('%03d', '001') + return {"ui": {"gifs": [preview]}, "result": ((save_output, output_files),)} + +class LoadAudio: + @classmethod + def INPUT_TYPES(s): + #Hide ffmpeg formats if ffmpeg isn't available + return { + "required": { + "audio_file": ("STRING", {"default": "input/", "vhs_path_extensions": ['wav','mp3','ogg','m4a','flac']}), + }, + "optional" : { + "seek_seconds": ("FLOAT", {"default": 0, "min": 0, "widgetType": "VHSTIMESTAMP"}), + "duration": ("FLOAT" , {"default": 0, "min": 0, "max": 10000000, "step": 0.01, "widgetType": "VHSTIMESTAMP"}), + } + } + + RETURN_TYPES = ("AUDIO", "FLOAT") + RETURN_NAMES = ("audio", "duration") + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/audio" + FUNCTION = "load_audio" + def load_audio(self, audio_file, seek_seconds=0, duration=0): + audio_file = strip_path(audio_file) + if audio_file is None or validate_path(audio_file) != True: + raise Exception("audio_file is not a valid path: " + audio_file) + if is_url(audio_file): + audio_file = try_download_video(audio_file) or audio_file + #Eagerly fetch the audio since the user must be using it if the + #node executes, unlike Load Video + audio = get_audio(audio_file, start_time=seek_seconds, duration=duration) + loaded_duration = audio['waveform'].size(2)/audio['sample_rate'] + return (audio, loaded_duration) + + @classmethod + def IS_CHANGED(s, audio_file, **kwargs): + return hash_path(audio_file) + + @classmethod + def VALIDATE_INPUTS(s, audio_file, **kwargs): + return validate_path(audio_file, allow_none=True) + +class LoadAudioUpload: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [] + for f in os.listdir(input_dir): + if os.path.isfile(os.path.join(input_dir, f)): + file_parts = f.split('.') + if len(file_parts) > 1 and (file_parts[-1] in audio_extensions): + files.append(f) + return {"required": { + "audio": (sorted(files),),}, + "optional": { + "start_time": ("FLOAT" , {"default": 0, "min": 0, "max": 10000000, "step": 0.01, "widgetType": "VHSTIMESTAMP"}), + "duration": ("FLOAT" , {"default": 0, "min": 0, "max": 10000000, "step": 0.01, "widgetType": "VHSTIMESTAMP"}), + }, + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/audio" + + RETURN_TYPES = ("AUDIO", "FLOAT") + RETURN_NAMES = ("audio", "duration") + FUNCTION = "load_audio" + + def load_audio(self, start_time=0, duration=0, **kwargs): + audio_file = folder_paths.get_annotated_filepath(strip_path(kwargs['audio'])) + if audio_file is None or validate_path(audio_file) != True: + raise Exception("audio_file is not a valid path: " + audio_file) + + audio = get_audio(audio_file, start_time, duration) + loaded_duration = audio['waveform'].size(2)/audio['sample_rate'] + return (audio, loaded_duration) + + @classmethod + def IS_CHANGED(s, audio, **kwargs): + audio_file = folder_paths.get_annotated_filepath(strip_path(audio)) + return hash_path(audio_file) + + @classmethod + def VALIDATE_INPUTS(s, audio, **kwargs): + audio_file = folder_paths.get_annotated_filepath(strip_path(audio)) + return validate_path(audio_file, allow_none=True) +class AudioToVHSAudio: + """Legacy method for external nodes that utilized VHS_AUDIO, + VHS_AUDIO is deprecated as a format and should no longer be used""" + @classmethod + def INPUT_TYPES(s): + return {"required": {"audio": ("AUDIO",)}} + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/audio" + + RETURN_TYPES = ("VHS_AUDIO", ) + RETURN_NAMES = ("vhs_audio",) + FUNCTION = "convert_audio" + + def convert_audio(self, audio): + ar = str(audio['sample_rate']) + ac = str(audio['waveform'].size(1)) + mux_args = [ffmpeg_path, "-f", "f32le", "-ar", ar, "-ac", ac, + "-i", "-", "-f", "wav", "-"] + + audio_data = audio['waveform'].squeeze(0).transpose(0,1) \ + .numpy().tobytes() + try: + res = subprocess.run(mux_args, input=audio_data, + capture_output=True, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occured in the ffmpeg subprocess:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + if res.stderr: + print(res.stderr.decode(*ENCODE_ARGS), end="", file=sys.stderr) + return (lambda: res.stdout,) + +class VHSAudioToAudio: + """Legacy method for external nodes that utilized VHS_AUDIO, + VHS_AUDIO is deprecated as a format and should no longer be used""" + @classmethod + def INPUT_TYPES(s): + return {"required": {"vhs_audio": ("VHS_AUDIO",)}} + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/audio" + + RETURN_TYPES = ("AUDIO", ) + RETURN_NAMES = ("audio",) + FUNCTION = "convert_audio" + + def convert_audio(self, vhs_audio): + if not vhs_audio or not vhs_audio(): + raise Exception("audio input is not valid") + args = [ffmpeg_path, "-i", '-'] + try: + res = subprocess.run(args + ["-f", "f32le", "-"], input=vhs_audio(), + capture_output=True, check=True) + audio = torch.frombuffer(bytearray(res.stdout), dtype=torch.float32) + except subprocess.CalledProcessError as e: + raise Exception("An error occured in the ffmpeg subprocess:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + match = re.search(', (\\d+) Hz, (\\w+), ',res.stderr.decode(*ENCODE_ARGS)) + if match: + ar = int(match.group(1)) + #NOTE: Just throwing an error for other channel types right now + #Will deal with issues if they come + ac = {"mono": 1, "stereo": 2}[match.group(2)] + else: + ar = 44100 + ac = 2 + audio = audio.reshape((-1,ac)).transpose(0,1).unsqueeze(0) + return ({'waveform': audio, 'sample_rate': ar},) + +class PruneOutputs: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "filenames": ("VHS_FILENAMES",), + "options": (["Intermediate", "Intermediate and Utility"],) + } + } + + RETURN_TYPES = () + OUTPUT_NODE = True + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + FUNCTION = "prune_outputs" + + def prune_outputs(self, filenames, options): + if len(filenames[1]) == 0: + return () + assert(len(filenames[1]) <= 3 and len(filenames[1]) >= 2) + delete_list = [] + if options in ["Intermediate", "Intermediate and Utility", "All"]: + delete_list += filenames[1][1:-1] + if options in ["Intermediate and Utility", "All"]: + delete_list.append(filenames[1][0]) + if options in ["All"]: + delete_list.append(filenames[1][-1]) + + output_dirs = [folder_paths.get_output_directory(), + folder_paths.get_temp_directory()] + for file in delete_list: + #Check that path is actually an output directory + if (os.path.commonpath([output_dirs[0], file]) != output_dirs[0]) \ + and (os.path.commonpath([output_dirs[1], file]) != output_dirs[1]): + raise Exception("Tried to prune output from invalid directory: " + file) + if os.path.exists(file): + os.remove(file) + return () + +class BatchManager: + def __init__(self, frames_per_batch=-1): + self.frames_per_batch = frames_per_batch + self.inputs = {} + self.outputs = {} + self.unique_id = None + self.has_closed_inputs = False + self.total_frames = float('inf') + def reset(self): + self.close_inputs() + for key in self.outputs: + if getattr(self.outputs[key][-1], "gi_suspended", False): + try: + self.outputs[key][-1].send(None) + except StopIteration: + pass + self.__init__(self.frames_per_batch) + def has_open_inputs(self): + return len(self.inputs) > 0 + def close_inputs(self): + for key in self.inputs: + if getattr(self.inputs[key][-1], "gi_suspended", False): + try: + self.inputs[key][-1].send(1) + except StopIteration: + pass + self.inputs = {} + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames_per_batch": ("INT", {"default": 16, "min": 1, "max": BIGMAX, "step": 1}) + }, + "hidden": { + "prompt": "PROMPT", + "unique_id": "UNIQUE_ID" + }, + } + + RETURN_TYPES = ("VHS_BatchManager",) + RETURN_NAMES = ("meta_batch",) + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + FUNCTION = "update_batch" + + def update_batch(self, frames_per_batch, prompt=None, unique_id=None): + if unique_id is not None and prompt is not None: + requeue = prompt[unique_id]['inputs'].get('requeue', 0) + else: + requeue = 0 + if requeue == 0: + self.reset() + self.frames_per_batch = frames_per_batch + self.unique_id = unique_id + else: + num_batches = (self.total_frames+self.frames_per_batch-1)//frames_per_batch + print(f'Meta-Batch {requeue}/{num_batches}') + #onExecuted seems to not be called unless some message is sent + return (self,) + + +class VideoInfo: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video_info": ("VHS_VIDEOINFO",), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = ("FLOAT","INT", "FLOAT", "INT", "INT", "FLOAT","INT", "FLOAT", "INT", "INT") + RETURN_NAMES = ( + "source_fps🟨", + "source_frame_count🟨", + "source_duration🟨", + "source_width🟨", + "source_height🟨", + "loaded_fps🟦", + "loaded_frame_count🟦", + "loaded_duration🟦", + "loaded_width🟦", + "loaded_height🟦", + ) + FUNCTION = "get_video_info" + + def get_video_info(self, video_info): + keys = ["fps", "frame_count", "duration", "width", "height"] + + source_info = [] + loaded_info = [] + + for key in keys: + source_info.append(video_info[f"source_{key}"]) + loaded_info.append(video_info[f"loaded_{key}"]) + + return (*source_info, *loaded_info) + + +class VideoInfoSource: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video_info": ("VHS_VIDEOINFO",), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = ("FLOAT","INT", "FLOAT", "INT", "INT",) + RETURN_NAMES = ( + "fps🟨", + "frame_count🟨", + "duration🟨", + "width🟨", + "height🟨", + ) + FUNCTION = "get_video_info" + + def get_video_info(self, video_info): + keys = ["fps", "frame_count", "duration", "width", "height"] + + source_info = [] + + for key in keys: + source_info.append(video_info[f"source_{key}"]) + + return (*source_info,) + + +class VideoInfoLoaded: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video_info": ("VHS_VIDEOINFO",), + } + } + + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + + RETURN_TYPES = ("FLOAT","INT", "FLOAT", "INT", "INT",) + RETURN_NAMES = ( + "fps🟦", + "frame_count🟦", + "duration🟦", + "width🟦", + "height🟦", + ) + FUNCTION = "get_video_info" + + def get_video_info(self, video_info): + keys = ["fps", "frame_count", "duration", "width", "height"] + + loaded_info = [] + + for key in keys: + loaded_info.append(video_info[f"loaded_{key}"]) + + return (*loaded_info,) + +class SelectFilename: + @classmethod + def INPUT_TYPES(s): + return {"required": {"filenames": ("VHS_FILENAMES",), "index": ("INT", {"default": -1, "step": 1, "min": -1})}} + RETURN_TYPES = ("STRING",) + RETURN_NAMES =("Filename",) + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + FUNCTION = "select_filename" + + def select_filename(self, filenames, index): + return (filenames[1][index],) +class Unbatch: + class Any(str): + def __ne__(self, other): + return False + @classmethod + def INPUT_TYPES(s): + return {"required": {"batched": ("*",)}} + RETURN_TYPES = (Any('*'),) + INPUT_IS_LIST = True + RETURN_NAMES =("unbatched",) + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + FUNCTION = "unbatch" + def unbatch(self, batched): + if isinstance(batched[0], torch.Tensor): + return (torch.cat(batched),) + if isinstance(batched[0], dict): + out = batched[0].copy() + if 'samples' in out: + out['samples'] = torch.cat([x['samples'] for x in batched]) + if 'waveform' in out: + out['waveform'] = torch.cat([x['waveform'] for x in batched]) + out.pop('batch_index', None) + return (out,) + return (functools.reduce(lambda x,y: x+y, batched),) + @classmethod + def VALIDATE_INPUTS(cls, input_types): + return True +class SelectLatest: + @classmethod + def INPUT_TYPES(s): + return {"required": {"filename_prefix": ("STRING", {'default': 'output/AnimateDiff', 'vhs_path_extensions': []}), + "filename_postfix": ("STRING", {"placeholder": ".webm"})}} + RETURN_TYPES = ("STRING",) + RETURN_NAMES =("Filename",) + CATEGORY = "Video Helper Suite 🎥🅥🅗🅢" + FUNCTION = "select_latest" + EXPERIMENTAL = True + + def select_latest(self, filename_prefix, filename_postfix): + assert False, "Not Reachable" + +NODE_CLASS_MAPPINGS = { + "VHS_VideoCombine": VideoCombine, + "VHS_LoadVideo": LoadVideoUpload, + "VHS_LoadVideoPath": LoadVideoPath, + "VHS_LoadVideoFFmpeg": LoadVideoFFmpegUpload, + "VHS_LoadVideoFFmpegPath": LoadVideoFFmpegPath, + "VHS_LoadImagePath": LoadImagePath, + "VHS_LoadImages": LoadImagesFromDirectoryUpload, + "VHS_LoadImagesPath": LoadImagesFromDirectoryPath, + "VHS_LoadAudio": LoadAudio, + "VHS_LoadAudioUpload": LoadAudioUpload, + "VHS_AudioToVHSAudio": AudioToVHSAudio, + "VHS_VHSAudioToAudio": VHSAudioToAudio, + "VHS_PruneOutputs": PruneOutputs, + "VHS_BatchManager": BatchManager, + "VHS_VideoInfo": VideoInfo, + "VHS_VideoInfoSource": VideoInfoSource, + "VHS_VideoInfoLoaded": VideoInfoLoaded, + "VHS_SelectFilename": SelectFilename, + # Batched Nodes + "VHS_VAEEncodeBatched": VAEEncodeBatched, + "VHS_VAEDecodeBatched": VAEDecodeBatched, + # Latent and Image nodes + "VHS_SplitLatents": SplitLatents, + "VHS_SplitImages": SplitImages, + "VHS_SplitMasks": SplitMasks, + "VHS_MergeLatents": MergeLatents, + "VHS_MergeImages": MergeImages, + "VHS_MergeMasks": MergeMasks, + "VHS_GetLatentCount": GetLatentCount, + "VHS_GetImageCount": GetImageCount, + "VHS_GetMaskCount": GetMaskCount, + "VHS_DuplicateLatents": RepeatLatents, + "VHS_DuplicateImages": RepeatImages, + "VHS_DuplicateMasks": RepeatMasks, + "VHS_SelectEveryNthLatent": SelectEveryNthLatent, + "VHS_SelectEveryNthImage": SelectEveryNthImage, + "VHS_SelectEveryNthMask": SelectEveryNthMask, + "VHS_SelectLatents": SelectLatents, + "VHS_SelectImages": SelectImages, + "VHS_SelectMasks": SelectMasks, + "VHS_Unbatch": Unbatch, + "VHS_SelectLatest": SelectLatest, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "VHS_VideoCombine": "Video Combine 🎥🅥🅗🅢", + "VHS_LoadVideo": "Load Video (Upload) 🎥🅥🅗🅢", + "VHS_LoadVideoPath": "Load Video (Path) 🎥🅥🅗🅢", + "VHS_LoadVideoFFmpeg": "Load Video FFmpeg (Upload) 🎥🅥🅗🅢", + "VHS_LoadVideoFFmpegPath": "Load Video FFmpeg (Path) 🎥🅥🅗🅢", + "VHS_LoadImagePath": "Load Image (Path) 🎥🅥🅗🅢", + "VHS_LoadImages": "Load Images (Upload) 🎥🅥🅗🅢", + "VHS_LoadImagesPath": "Load Images (Path) 🎥🅥🅗🅢", + "VHS_LoadAudio": "Load Audio (Path)🎥🅥🅗🅢", + "VHS_LoadAudioUpload": "Load Audio (Upload)🎥🅥🅗🅢", + "VHS_AudioToVHSAudio": "Audio to legacy VHS_AUDIO🎥🅥🅗🅢", + "VHS_VHSAudioToAudio": "Legacy VHS_AUDIO to Audio🎥🅥🅗🅢", + "VHS_PruneOutputs": "Prune Outputs 🎥🅥🅗🅢", + "VHS_BatchManager": "Meta Batch Manager 🎥🅥🅗🅢", + "VHS_VideoInfo": "Video Info 🎥🅥🅗🅢", + "VHS_VideoInfoSource": "Video Info (Source) 🎥🅥🅗🅢", + "VHS_VideoInfoLoaded": "Video Info (Loaded) 🎥🅥🅗🅢", + "VHS_SelectFilename": "Select Filename 🎥🅥🅗🅢", + # Batched Nodes + "VHS_VAEEncodeBatched": "VAE Encode Batched 🎥🅥🅗🅢", + "VHS_VAEDecodeBatched": "VAE Decode Batched 🎥🅥🅗🅢", + # Latent and Image nodes + "VHS_SplitLatents": "Split Latents 🎥🅥🅗🅢", + "VHS_SplitImages": "Split Images 🎥🅥🅗🅢", + "VHS_SplitMasks": "Split Masks 🎥🅥🅗🅢", + "VHS_MergeLatents": "Merge Latents 🎥🅥🅗🅢", + "VHS_MergeImages": "Merge Images 🎥🅥🅗🅢", + "VHS_MergeMasks": "Merge Masks 🎥🅥🅗🅢", + "VHS_GetLatentCount": "Get Latent Count 🎥🅥🅗🅢", + "VHS_GetImageCount": "Get Image Count 🎥🅥🅗🅢", + "VHS_GetMaskCount": "Get Mask Count 🎥🅥🅗🅢", + "VHS_DuplicateLatents": "Repeat Latents 🎥🅥🅗🅢", + "VHS_DuplicateImages": "Repeat Images 🎥🅥🅗🅢", + "VHS_DuplicateMasks": "Repeat Masks 🎥🅥🅗🅢", + "VHS_SelectEveryNthLatent": "Select Every Nth Latent 🎥🅥🅗🅢", + "VHS_SelectEveryNthImage": "Select Every Nth Image 🎥🅥🅗🅢", + "VHS_SelectEveryNthMask": "Select Every Nth Mask 🎥🅥🅗🅢", + "VHS_SelectLatents": "Select Latents 🎥🅥🅗🅢", + "VHS_SelectImages": "Select Images 🎥🅥🅗🅢", + "VHS_SelectMasks": "Select Masks 🎥🅥🅗🅢", + "VHS_Unbatch": "Unbatch 🎥🅥🅗🅢", + "VHS_SelectLatest": "Select Latest 🎥🅥🅗🅢", +} diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/server.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/server.py new file mode 100644 index 0000000000000000000000000000000000000000..5fed225a50c3294fb07d66445c3acae8c3b86081 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/server.py @@ -0,0 +1,294 @@ +import server +import folder_paths +import os +import subprocess +import re + +import asyncio +import av + +from .utils import is_url, get_sorted_dir_files_from_directory, ffmpeg_path, \ + validate_sequence, is_safe_path, strip_path, try_download_video, ENCODE_ARGS +from comfy.k_diffusion.utils import FolderOfImages + + +web = server.web + +@server.PromptServer.instance.routes.get("/vhs/viewvideo") +@server.PromptServer.instance.routes.get("/viewvideo") +async def view_video(request): + query = request.rel_url.query + path_res = await resolve_path(query) + if isinstance(path_res, web.Response): + return path_res + file, filename, output_dir = path_res + + if ffmpeg_path is None: + #Don't just return file, that provides arbitrary read access to any file + if is_safe_path(output_dir, strict=True): + return web.FileResponse(path=file) + + frame_rate = query.get('frame_rate', 8) + if query.get('format', 'video') == "folder": + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + concat_file = os.path.join(folder_paths.get_temp_directory(), "image_sequence_preview.txt") + skip_first_images = int(query.get('skip_first_images', 0)) + select_every_nth = int(query.get('select_every_nth', 1)) or 1 + valid_images = get_sorted_dir_files_from_directory(file, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS) + if len(valid_images) == 0: + return web.Response(status=204) + with open(concat_file, "w") as f: + f.write("ffconcat version 1.0\n") + for path in valid_images: + f.write("file '" + os.path.abspath(path) + "'\n") + f.write("duration 0.125\n") + in_args = ["-safe", "0", "-i", concat_file] + else: + in_args = ["-i", file] + if '%' in file: + in_args = ['-framerate', str(frame_rate)] + in_args + #Do prepass to pull info + #breaks skip_first frames if this default is ever actually needed + base_fps = 30 + try: + proc = await asyncio.create_subprocess_exec(ffmpeg_path, *in_args, '-t', + '0','-f', 'null','-', stdout=subprocess.PIPE, + stderr=subprocess.PIPE, stdin=subprocess.DEVNULL) + _, res_stderr = await proc.communicate() + + match = re.search(': Video: (\\w+) .+, (\\d+) fps,', res_stderr.decode(*ENCODE_ARGS)) + if match: + base_fps = float(match.group(2)) + if match.group(1) == 'vp9': + #force libvpx for transparency + in_args = ['-c:v', 'libvpx-vp9'] + in_args + except subprocess.CalledProcessError as e: + print("An error occurred in the ffmpeg prepass:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + return web.Response(status=500) + vfilters = [] + target_rate = float(query.get('force_rate', 0)) or base_fps + modified_rate = target_rate / (float(query.get('select_every_nth',1)) or 1) + start_time = 0 + if 'start_time' in query: + start_time = float(query['start_time']) + elif float(query.get('skip_first_frames', 0)) > 0: + start_time = float(query.get('skip_first_frames'))/target_rate + if start_time > 1/modified_rate: + start_time += 1/modified_rate + if start_time > 0: + if start_time > 4: + post_seek = ['-ss', '4'] + pre_seek = ['-ss', str(start_time - 4)] + else: + post_seek = ['-ss', str(start_time)] + pre_seek = [] + else: + pre_seek = [] + post_seek = [] + + args = [ffmpeg_path, "-v", "error"] + pre_seek + in_args + post_seek + if target_rate != 0: + args += ['-r', str(modified_rate)] + if query.get('force_size','Disabled') != "Disabled": + size = query['force_size'].split('x') + if size[0] == '?' or size[1] == '?': + size[0] = "-2" if size[0] == '?' else f"'min({size[0]},iw)'" + size[1] = "-2" if size[1] == '?' else f"'min({size[1]},ih)'" + else: + #Aspect ratio is likely changed. A more complex command is required + #to crop the output to the new aspect ratio + ar = float(size[0])/float(size[1]) + vfilters.append(f"crop=if(gt({ar}\\,a)\\,iw\\,ih*{ar}):if(gt({ar}\\,a)\\,iw/{ar}\\,ih)") + size = ':'.join(size) + vfilters.append(f"scale={size}") + if len(vfilters) > 0: + args += ["-vf", ",".join(vfilters)] + if float(query.get('frame_load_cap', 0)) > 0: + args += ["-frames:v", query['frame_load_cap'].split('.')[0]] + #TODO:reconsider adding high frame cap/setting default frame cap on node + if query.get('deadline', 'realtime') == 'good': + deadline = 'good' + else: + deadline = 'realtime' + + args += ['-c:v', 'libvpx-vp9','-deadline', deadline, '-cpu-used', '8', '-f', 'webm', '-'] + + try: + proc = await asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE, + stdin=subprocess.DEVNULL) + try: + resp = web.StreamResponse() + resp.content_type = 'video/webm' + resp.headers["Content-Disposition"] = f"filename=\"{filename}\"" + await resp.prepare(request) + while len(bytes_read := await proc.stdout.read(2**20)) != 0: + await resp.write(bytes_read) + #Of dubious value given frequency of kill calls, but more correct + await proc.wait() + except (ConnectionResetError, ConnectionError) as e: + proc.kill() + except BrokenPipeError as e: + pass + return resp +@server.PromptServer.instance.routes.get("/vhs/viewaudio") +async def view_audio(request): + query = request.rel_url.query + path_res = await resolve_path(query) + if isinstance(path_res, web.Response): + return path_res + file, filename, output_dir = path_res + if ffmpeg_path is None: + #Don't just return file, that provides arbitrary read access to any file + if is_safe_path(output_dir, strict=True): + return web.FileResponse(path=file) + + in_args = ["-i", file] + start_time = 0 + if 'start_time' in query: + start_time = float(query['start_time']) + args = [ffmpeg_path, "-v", "error", '-vn'] + in_args + ['-ss', str(start_time)] + if float(query.get('duration', 0)) > 0: + args += ['-t', str(query['duration'])] + if query.get('deadline', 'realtime') == 'good': + deadline = 'good' + else: + deadline = 'realtime' + + args += ['-c:a', 'libopus','-deadline', deadline, '-cpu-used', '8', '-f', 'webm', '-'] + try: + proc = await asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE, + stdin=subprocess.DEVNULL) + try: + resp = web.StreamResponse() + resp.content_type = 'audio/webm' + resp.headers["Content-Disposition"] = f"filename=\"{filename}\"" + await resp.prepare(request) + while len(bytes_read := await proc.stdout.read(2**20)) != 0: + await resp.write(bytes_read) + #Of dubious value given frequency of kill calls, but more correct + await proc.wait() + except (ConnectionResetError, ConnectionError) as e: + proc.kill() + except BrokenPipeError as e: + pass + return resp + +query_cache = {} +@server.PromptServer.instance.routes.get("/vhs/queryvideo") +async def query_video(request): + query = request.rel_url.query + filepath = await resolve_path(query) + #TODO: cache lookup + if isinstance(filepath, web.Response): + return filepath + filepath = filepath[0] + if filepath.endswith(".webp"): + # ffmpeg doesn't support decoding animated WebP https://trac.ffmpeg.org/ticket/4907 + return web.json_response({}) + if filepath in query_cache and query_cache[filepath][0] == os.stat(filepath).st_mtime: + source = query_cache[filepath][1] + else: + source = {} + try: + with av.open(filepath) as cont: + stream = cont.streams.video[0] + source['fps'] = float(stream.average_rate) + source['duration'] = float(cont.duration / av.time_base) + + if stream.codec_context.name == 'vp9': + cc = av.Codec('libvpx-vp9', 'r').create() + else: + cc = stream + def fit(): + for packet in cont.demux(video=0): + yield from cc.decode(packet) + frame = next(fit()) + + source['size'] = [frame.width, frame.height] + source['alpha'] = 'a' in frame.format.name + source['frames'] = stream.metadata.get('NUMBER_OF_FRAMES', round(source['duration'] * source['fps'])) + query_cache[filepath] = (os.stat(filepath).st_mtime, source) + except Exception: + pass + if not 'frames' in source: + return web.json_response({}) + loaded = {} + loaded['duration'] = source['duration'] + loaded['duration'] -= float(query.get('start_time',0)) + loaded['fps'] = float(query.get('force_rate', 0)) or source.get('fps',1) + loaded['duration'] -= int(query.get('skip_first_frames', 0)) / loaded['fps'] + loaded['fps'] /= int(query.get('select_every_nth', 1)) or 1 + loaded['frames'] = round(loaded['duration'] * loaded['fps']) + return web.json_response({'source': source, 'loaded': loaded}) + +async def resolve_path(query): + if "filename" not in query: + return web.Response(status=204) + filename = query["filename"] + + #Path code misformats urls on windows and must be skipped + if is_url(filename): + file = await asyncio.to_thread(try_download_video, filename) or file + filname, output_dir = os.path.split(file) + return file, filename, output_dir + else: + filename, output_dir = folder_paths.annotated_filepath(filename) + + type = query.get("type", "output") + if type == "path": + #special case for path_based nodes + #NOTE: output_dir may be empty, but non-None + output_dir, filename = os.path.split(strip_path(filename)) + if output_dir is None: + output_dir = folder_paths.get_directory_by_type(type) + + if output_dir is None: + return web.Response(status=204) + + if not is_safe_path(output_dir): + return web.Response(status=204) + + if "subfolder" in query: + output_dir = os.path.join(output_dir, query["subfolder"]) + + filename = os.path.basename(filename) + file = os.path.join(output_dir, filename) + + if not os.path.exists(file): + return web.Response(status=204) + if query.get('format', 'video') == 'folder': + if not os.path.isdir(file): + return web.Response(status=204) + else: + if not os.path.isfile(file) and not validate_sequence(file): + return web.Response(status=204) + return file, filename, output_dir + +@server.PromptServer.instance.routes.get("/vhs/getpath") +@server.PromptServer.instance.routes.get("/getpath") +async def get_path(request): + query = request.rel_url.query + if "path" not in query: + return web.Response(status=204) + #NOTE: path always ends in `/`, so this is functionally an lstrip + path = os.path.abspath(strip_path(query["path"])) + + if not os.path.exists(path) or not is_safe_path(path): + return web.json_response([]) + + #Use get so None is default instead of keyerror + valid_extensions = query.get("extensions") + valid_items = [] + for item in os.scandir(path): + try: + if item.is_dir(): + valid_items.append(item.name + "/") + continue + if valid_extensions is None or item.name.split(".")[-1].lower() in valid_extensions: + valid_items.append(item.name) + except OSError: + #Broken symlinks can throw a very unhelpful "Invalid argument" + pass + valid_items.sort(key=lambda f: os.stat(os.path.join(path,f)).st_mtime) + return web.json_response(valid_items) diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/utils.py b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..86b6393ac9e980eb06e83f1a3ded148927b8d7a9 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/videohelpersuite/utils.py @@ -0,0 +1,443 @@ +import hashlib +import os +from typing import Iterable +import shutil +import subprocess +import re +import time +from collections.abc import Mapping +from typing import Union +import functools +import torch +from torch import Tensor + +import server +from .logger import logger +import folder_paths + +BIGMIN = -(2**53-1) +BIGMAX = (2**53-1) + +DIMMAX = 8192 + +ENCODE_ARGS = ("utf-8", 'backslashreplace') + +def ffmpeg_suitability(path): + try: + version = subprocess.run([path, "-version"], check=True, + capture_output=True).stdout.decode(*ENCODE_ARGS) + except: + return 0 + score = 0 + #rough layout of the importance of various features + simple_criterion = [("libvpx", 20),("264",10), ("265",3), + ("svtav1",5),("libopus", 1)] + for criterion in simple_criterion: + if version.find(criterion[0]) >= 0: + score += criterion[1] + #obtain rough compile year from copyright information + copyright_index = version.find('2000-2') + if copyright_index >= 0: + copyright_year = version[copyright_index+6:copyright_index+9] + if copyright_year.isnumeric(): + score += int(copyright_year) + return score + +class MultiInput(str): + def __new__(cls, string, allowed_types="*"): + res = super().__new__(cls, string) + res.allowed_types=allowed_types + return res + def __ne__(self, other): + if self.allowed_types == "*" or other == "*": + return False + return other not in self.allowed_types +imageOrLatent = MultiInput("IMAGE", ["IMAGE", "LATENT"]) +floatOrInt = MultiInput("FLOAT", ["FLOAT", "INT"]) + +class ContainsAll(dict): + def __contains__(self, other): + return True + def __getitem__(self, key): + return super().get(key, (None, {})) + +if "VHS_FORCE_FFMPEG_PATH" in os.environ: + ffmpeg_path = os.environ.get("VHS_FORCE_FFMPEG_PATH") +else: + ffmpeg_paths = [] + try: + from imageio_ffmpeg import get_ffmpeg_exe + imageio_ffmpeg_path = get_ffmpeg_exe() + ffmpeg_paths.append(imageio_ffmpeg_path) + except: + if "VHS_USE_IMAGEIO_FFMPEG" in os.environ: + raise + logger.warn("Failed to import imageio_ffmpeg") + if "VHS_USE_IMAGEIO_FFMPEG" in os.environ: + ffmpeg_path = imageio_ffmpeg_path + else: + system_ffmpeg = shutil.which("ffmpeg") + if system_ffmpeg is not None: + ffmpeg_paths.append(system_ffmpeg) + if os.path.isfile("ffmpeg"): + ffmpeg_paths.append(os.path.abspath("ffmpeg")) + if os.path.isfile("ffmpeg.exe"): + ffmpeg_paths.append(os.path.abspath("ffmpeg.exe")) + if len(ffmpeg_paths) == 0: + logger.error("No valid ffmpeg found.") + ffmpeg_path = None + elif len(ffmpeg_paths) == 1: + #Evaluation of suitability isn't required, can take sole option + #to reduce startup time + ffmpeg_path = ffmpeg_paths[0] + else: + ffmpeg_path = max(ffmpeg_paths, key=ffmpeg_suitability) +gifski_path = os.environ.get("VHS_GIFSKI", None) +if gifski_path is None: + gifski_path = os.environ.get("JOV_GIFSKI", None) + if gifski_path is None: + gifski_path = shutil.which("gifski") +ytdl_path = os.environ.get("VHS_YTDL", None) or shutil.which('yt-dlp') \ + or shutil.which('youtube-dl') +download_history = {} +def try_download_video(url): + if ytdl_path is None: + return None + if url in download_history: + return download_history[url] + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + #Format information could be added to only download audio for Load Audio, + #but this gets hairy if same url is also used for video. + #Best to just always keep defaults + #dl_format = ['-f', 'ba'] if is_audio else [] + try: + res = subprocess.run([ytdl_path, "--print", "after_move:filepath", + "-P", folder_paths.get_temp_directory(), url], + capture_output=True, check=True) + #strip newline + file = res.stdout.decode(*ENCODE_ARGS)[:-1] + except subprocess.CalledProcessError as e: + raise Exception("An error occurred in the yt-dl process:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + file = None + download_history[url] = file + return file + +def is_safe_path(path, strict=False): + if "VHS_STRICT_PATHS" not in os.environ and not strict: + return True + basedir = os.path.abspath('.') + try: + common_path = os.path.commonpath([basedir, path]) + except: + #Different drive on windows + return False + return common_path == basedir + +def get_sorted_dir_files_from_directory(directory: str, skip_first_images: int=0, select_every_nth: int=1, extensions: Iterable=None): + directory = strip_path(directory) + dir_files = os.listdir(directory) + dir_files = sorted(dir_files) + dir_files = [os.path.join(directory, x) for x in dir_files] + dir_files = list(filter(lambda filepath: os.path.isfile(filepath), dir_files)) + # filter by extension, if needed + if extensions is not None: + extensions = list(extensions) + new_dir_files = [] + for filepath in dir_files: + ext = "." + filepath.split(".")[-1] + if ext.lower() in extensions: + new_dir_files.append(filepath) + dir_files = new_dir_files + # start at skip_first_images + dir_files = dir_files[skip_first_images:] + dir_files = dir_files[0::select_every_nth] + return dir_files + + +# modified from https://stackoverflow.com/questions/22058048/hashing-a-file-in-python +def calculate_file_hash(filename: str, hash_every_n: int = 1): + #Larger video files were taking >.5 seconds to hash even when cached, + #so instead the modified time from the filesystem is used as a hash + h = hashlib.sha256() + h.update(filename.encode()) + h.update(str(os.path.getmtime(filename)).encode()) + return h.hexdigest() + +prompt_queue = server.PromptServer.instance.prompt_queue +def requeue_workflow_unchecked(): + """Requeues the current workflow without checking for multiple requeues""" + currently_running = prompt_queue.currently_running + value = next(iter(currently_running.values())) + + # Handle both old (5 values) and new (6 values) ComfyUI versions + if len(value) == 6: + (_, prompt_id, prompt, extra_data, outputs_to_execute, _) = value + else: + (_, prompt_id, prompt, extra_data, outputs_to_execute) = value + + #Ensure batch_managers are marked stale + prompt = prompt.copy() + for uid in prompt: + if prompt[uid]['class_type'] == 'VHS_BatchManager': + prompt[uid]['inputs']['requeue'] = prompt[uid]['inputs'].get('requeue',0)+1 + + #execution.py has guards for concurrency, but server doesn't. + #TODO: Check that this won't be an issue + number = -server.PromptServer.instance.number + server.PromptServer.instance.number += 1 + prompt_id = str(server.uuid.uuid4()) + # Put back with 6 elements to match what ComfyUI expects + sensitive = value[5] if len(value) > 5 else {} + prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive)) + +requeue_guard = [None, 0, 0, {}] + +def requeue_workflow(requeue_required=(-1,True)): + assert(len(prompt_queue.currently_running) == 1) + global requeue_guard + + value = next(iter(prompt_queue.currently_running.values())) + + # Handle both old (5 values) and new (6 values) ComfyUI versions + if len(value) == 6: + (run_number, _, prompt, extra_data, outputs_to_execute, _) = value + else: + (run_number, _, prompt, extra_data, outputs_to_execute) = value + + if requeue_guard[0] != run_number: + #Calculate a count of how many outputs are managed by a batch manager + managed_outputs=0 + for bm_uid in prompt: + if prompt[bm_uid]['class_type'] == 'VHS_BatchManager': + for output_uid in prompt: + if prompt[output_uid]['class_type'] in ["VHS_VideoCombine"]: + for inp in prompt[output_uid]['inputs'].values(): + if inp == [bm_uid, 0]: + managed_outputs+=1 + requeue_guard = [run_number, 0, managed_outputs, {}] + requeue_guard[1] = requeue_guard[1]+1 + requeue_guard[3][requeue_required[0]] = requeue_required[1] + if requeue_guard[1] == requeue_guard[2] and max(requeue_guard[3].values()): + requeue_workflow_unchecked() + +def get_audio(file, start_time=0, duration=0): + args = [ffmpeg_path, "-i", file] + if start_time > 0: + args += ["-ss", str(start_time)] + if duration > 0: + args += ["-t", str(duration)] + try: + #TODO: scan for sample rate and maintain + res = subprocess.run(args + ["-f", "f32le", "-"], + capture_output=True, check=True) + audio = torch.frombuffer(bytearray(res.stdout), dtype=torch.float32) + match = re.search(', (\\d+) Hz, (\\w+), ',res.stderr.decode(*ENCODE_ARGS)) + except subprocess.CalledProcessError as e: + raise Exception(f"VHS failed to extract audio from {file}:\n" \ + + e.stderr.decode(*ENCODE_ARGS)) + if match: + ar = int(match.group(1)) + #NOTE: Just throwing an error for other channel types right now + #Will deal with issues if they come + ac = {"mono": 1, "stereo": 2}[match.group(2)] + else: + ar = 44100 + ac = 2 + audio = audio.reshape((-1,ac)).transpose(0,1).unsqueeze(0) + return {'waveform': audio, 'sample_rate': ar} + +class LazyAudioMap(Mapping): + def __init__(self, file, start_time, duration): + self.file = file + self.start_time=start_time + self.duration=duration + self._dict=None + def __getitem__(self, key): + if self._dict is None: + self._dict = get_audio(self.file, self.start_time, self.duration) + return self._dict[key] + def __iter__(self): + if self._dict is None: + self._dict = get_audio(self.file, self.start_time, self.duration) + return iter(self._dict) + def __len__(self): + if self._dict is None: + self._dict = get_audio(self.file, self.start_time, self.duration) + return len(self._dict) +def lazy_get_audio(file, start_time=0, duration=0, **kwargs): + return LazyAudioMap(file, start_time, duration) + +def is_url(url): + return url.split("://")[0] in ["http", "https"] + +def validate_sequence(path): + #Check if path is a valid ffmpeg sequence that points to at least one file + (path, file) = os.path.split(path) + if not os.path.isdir(path): + return False + match = re.search('%0?\\d+d', file) + if not match: + return False + seq = match.group() + if seq == '%d': + seq = '\\\\d+' + else: + seq = '\\\\d{%s}' % seq[1:-1] + file_matcher = re.compile(re.sub('%0?\\d+d', seq, file)) + for file in os.listdir(path): + if file_matcher.fullmatch(file): + return True + return False + +def strip_path(path): + #This leaves whitespace inside quotes and only a single " + #thus ' ""test"' -> '"test' + #consider path.strip(string.whitespace+"\"") + #or weightier re.fullmatch("[\\s\"]*(.+?)[\\s\"]*", path).group(1) + path = path.strip() + if path.startswith("\""): + path = path[1:] + if path.endswith("\""): + path = path[:-1] + return path +def hash_path(path): + if path is None: + return "input" + if is_url(path): + return "url" + if not os.path.isfile(path): + return "DNE" + return calculate_file_hash(strip_path(path)) + + +def validate_path(path, allow_none=False, allow_url=True): + if path is None: + return allow_none + if is_url(path): + #Probably not feasible to check if url resolves here + if not allow_url: + return "URLs are unsupported for this path" + return is_safe_path(path) + if not os.path.isfile(strip_path(path)): + return "Invalid file path: {}".format(path) + return is_safe_path(path) + + +def validate_index(index: int, length: int=0, is_range: bool=False, allow_negative=False, allow_missing=False) -> int: + # if part of range, do nothing + if is_range: + return index + # otherwise, validate index + # validate not out of range - only when latent_count is passed in + if length > 0 and index > length-1 and not allow_missing: + raise IndexError(f"Index '{index}' out of range for {length} item(s).") + # if negative, validate not out of range + if index < 0: + if not allow_negative: + raise IndexError(f"Negative indeces not allowed, but was '{index}'.") + conv_index = length+index + if conv_index < 0 and not allow_missing: + raise IndexError(f"Index '{index}', converted to '{conv_index}' out of range for {length} item(s).") + index = conv_index + return index + + +def convert_to_index_int(raw_index: str, length: int=0, is_range: bool=False, allow_negative=False, allow_missing=False) -> int: + try: + return validate_index(int(raw_index), length=length, is_range=is_range, allow_negative=allow_negative, allow_missing=allow_missing) + except ValueError as e: + raise ValueError(f"Index '{raw_index}' must be an integer.", e) + + +def convert_str_to_indexes(indexes_str: str, length: int=0, allow_missing=False) -> list[int]: + if not indexes_str: + return [] + int_indexes = list(range(0, length)) + allow_negative = length > 0 + chosen_indexes = [] + # parse string - allow positive ints, negative ints, and ranges separated by ':' + groups = indexes_str.split(",") + groups = [g.strip() for g in groups] + for g in groups: + # parse range of indeces (e.g. 2:16) + if ':' in g: + index_range = g.split(":", 2) + index_range = [r.strip() for r in index_range] + + start_index = index_range[0] + if len(start_index) > 0: + start_index = convert_to_index_int(start_index, length=length, is_range=True, allow_negative=allow_negative, allow_missing=allow_missing) + else: + start_index = 0 + end_index = index_range[1] + if len(end_index) > 0: + end_index = convert_to_index_int(end_index, length=length, is_range=True, allow_negative=allow_negative, allow_missing=allow_missing) + else: + end_index = length + # support step as well, to allow things like reversing, every-other, etc. + step = 1 + if len(index_range) > 2: + step = index_range[2] + if len(step) > 0: + step = convert_to_index_int(step, length=length, is_range=True, allow_negative=True, allow_missing=True) + else: + step = 1 + # if latents were passed in, base indeces on known latent count + if len(int_indexes) > 0: + chosen_indexes.extend(int_indexes[start_index:end_index][::step]) + # otherwise, assume indeces are valid + else: + chosen_indexes.extend(list(range(start_index, end_index, step))) + # parse individual indeces + else: + chosen_indexes.append(convert_to_index_int(g, length=length, allow_negative=allow_negative, allow_missing=allow_missing)) + return chosen_indexes + + +def select_indexes(input_obj: Union[Tensor, list], idxs: list): + if type(input_obj) == Tensor: + return input_obj[idxs] + else: + return [input_obj[i] for i in idxs] + +def merge_filter_args(args, ftype="-vf"): + #TODO This doesn't account for filter_complex + #Will likely need to convert all filters to filter complex in the future + #But that requires source/output deduplication + try: + start_index = args.index(ftype)+1 + index = start_index + while True: + index = args.index(ftype, index) + args[start_index] += ',' + args[index+1] + args.pop(index) + args.pop(index) + except ValueError: + pass + +def select_indexes_from_str(input_obj: Union[Tensor, list], indexes: str, err_if_missing=True, err_if_empty=True): + real_idxs = convert_str_to_indexes(indexes, len(input_obj), allow_missing=not err_if_missing) + if err_if_empty and len(real_idxs) == 0: + raise Exception(f"Nothing was selected based on indexes found in '{indexes}'.") + return select_indexes(input_obj, real_idxs) + +def hook(obj, attr): + def dec(f): + f = functools.update_wrapper(f, getattr(obj,attr)) + setattr(obj,attr,f) + return f + return dec + +def cached(duration): + def dec(f): + cached_ret = None + cache_time = 0 + def cached_func(): + nonlocal cache_time, cached_ret + if time.time() > cache_time + duration or cached_ret is None: + cache_time = time.time() + cached_ret = f() + return cached_ret + return cached_func + return dec diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/web/js/VHS.core.js b/zavodik/nodes/ComfyUI-VideoHelperSuite/web/js/VHS.core.js new file mode 100644 index 0000000000000000000000000000000000000000..39eeecd1c23192ae63a4e732ef43b35ad9114f14 --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/web/js/VHS.core.js @@ -0,0 +1,2563 @@ +import { app } from '../../../scripts/app.js' +import { api } from '../../../scripts/api.js' +import { setWidgetConfig } from '../../../extensions/core/widgetInputs.js' +import { applyTextReplacements } from "../../../scripts/utils.js"; + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object && object[property]) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + return callback.apply(this, arguments) ?? r + }; + } else { + object[property] = callback; + } +} + +function getNodeById(id, graph=app.graph) { + let cg = graph + let node = undefined + for (let sid of (''+id).split(':')) { + node = cg?.getNodeById?.(sid) + cg = node?.subgraph + } + return node +} + +const convDict = { + VHS_LoadImages : ["directory", null, "image_load_cap", "skip_first_images", "select_every_nth"], + VHS_LoadImagesPath : ["directory", "image_load_cap", "skip_first_images", "select_every_nth"], + VHS_VideoCombine : ["frame_rate", "loop_count", "filename_prefix", "format", "pingpong", "save_image"], + VHS_LoadVideo : ["video", "force_rate", "force_size", "frame_load_cap", "skip_first_frames", "select_every_nth"], + VHS_LoadVideoPath : ["video", "force_rate", "force_size", "frame_load_cap", "skip_first_frames", "select_every_nth"], +}; +const renameDict = {VHS_VideoCombine : {save_output : "save_image"}} +function useKVState(nodeType) { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + chainCallback(this, "onConfigure", function(info) { + if (!this.widgets) { + //Node has no widgets, there is nothing to restore + return + } + if (typeof(info.widgets_values) != "object") { + //widgets_values is in some unknown inactionable format + return + } + let widgetDict = info.widgets_values + if (info.widgets_values.length) { + //widgets_values is in the old list format + if (this.type in convDict) { + //widget does not have a conversion format provided + let convList = convDict[this.type]; + if(info.widgets_values.length >= convList.length) { + //has all required fields + widgetDict = {} + for (let i = 0; i < convList.length; i++) { + if(!convList[i]) { + //Element should not be processed (upload button on load image sequence) + continue + } + widgetDict[convList[i]] = info.widgets_values[i]; + } + } else { + //widgets_values is missing elements marked as required + //let it fall through to failure state + } + } + } + if ('force_size' in widgetDict) { + //force size has been phased out, Migrate state + if (widgetDict.force_size.includes?.('x')) { + let sizes = widgetDict.force_size.split('x') + if (sizes[0] != '?') { + widgetDict.custom_width = parseInt(sizes[0]) + } else { + widgetDict.custom_width = 0 + } + if (sizes[1] != '?') { + widgetDict.custom_height = parseInt(sizes[1]) + } else { + widgetDict.custom_height = 0 + } + } else { + if (['Disabled', 'Custom Height'].includes(widgetDict.force_size)) { + widgetDict.custom_width = 0 + } + if (['Disabled', 'Custom Width'].includes(widgetDict.force_size)) { + widgetDict.custom_height = 0 + } + } + } + if (widgetDict.videopreview?.params?.force_size) { + delete widgetDict.videopreview.params.force_size + } + let inputs = {} + for (let i of this.inputs) { + inputs[i.name] = i + } + if (widgetDict.length == undefined) { + for (let w of this.widgets) { + if (w.type =="button") { + continue + } + if (w.name in widgetDict) { + w.value = widgetDict[w.name]; + w.callback?.(w.value) + } else { + //Check for a legacy name that needs migrating + if (this.type in renameDict && w.name in renameDict[this.type]) { + if (renameDict[this.type][w.name] in widgetDict) { + w.value = widgetDict[renameDict[this.type][w.name]] + w.callback?.(w.value) + continue + } + } + //attempt to restore default value + let inputs = LiteGraph.getNodeType(this.type).nodeData.input; + let initialValue = null; + if (inputs?.required?.hasOwnProperty(w.name)) { + if (inputs.required[w.name][1]?.hasOwnProperty("default")) { + initialValue = inputs.required[w.name][1].default; + } else if (inputs.required[w.name][0].length) { + initialValue = inputs.required[w.name][0][0]; + } + } else if (inputs?.optional?.hasOwnProperty(w.name)) { + if (inputs.optional[w.name][1]?.hasOwnProperty("default")) { + initialValue = inputs.optional[w.name][1].default; + } else if (inputs.optional[w.name][0].length) { + initialValue = inputs.optional[w.name][0][0]; + } + } + if (initialValue) { + w.value = initialValue; + w.callback?.(w.value) + } + } + if (w.name in inputs && w.config) { + setWidgetConfig(inputs[w.name], w.config) + } + } + } else { + //Saved data was not a map made by this method + //and a conversion dict for it does not exist + //It's likely an array and that has been blindly applied + if (info?.widgets_values?.length != this.widgets.length) { + //Widget could not have restored properly + //Note if multiple node loads fail, only the latest error dialog displays + app.ui.dialog.show("Failed to restore node: " + this.title + "\nPlease remove and re-add it.") + this.bgcolor = "#C00" + } + } + }); + chainCallback(this, "onSerialize", function(info) { + info.widgets_values = {}; + if (!this.widgets) { + //object has no widgets, there is nothing to store + return; + } + for (let w of this.widgets) { + info.widgets_values[w.name] = w.value; + } + }); + }) +} +var helpDOM; +if (!app.helpDOM) { + helpDOM = document.createElement("div"); + app.VHSHelp = helpDOM +} +function initHelpDOM() { + let parentDOM = document.createElement("div"); + parentDOM.className = "VHS_floatinghelp" + document.body.appendChild(parentDOM) + parentDOM.appendChild(helpDOM) + helpDOM.className = "litegraph"; + let scrollbarStyle = document.createElement('style'); + scrollbarStyle.innerHTML = ` + .VHS_floatinghelp { + scrollbar-width: 6px; + scrollbar-color: #0003 #0000; + &::-webkit-scrollbar { + background: transparent; + width: 6px; + } + &::-webkit-scrollbar-thumb { + background: #0005; + border-radius: 20px + } + &::-webkit-scrollbar-button { + display: none; + } + } + .VHS_loopedvideo::-webkit-media-controls-mute-button { + display:none; + } + .VHS_loopedvideo::-webkit-media-controls-fullscreen-button { + display:none; + } + ` + scrollbarStyle.id = 'scroll-properties' + parentDOM.appendChild(scrollbarStyle) + chainCallback(app.canvas, "onDrawForeground", function (ctx, visible_rect){ + let n = helpDOM.node + if (!n || !n?.graph) { + parentDOM.style['left'] = '-5000px' + return + } + //draw : function(ctx, node, widgetWidth, widgetY, height) { + //update widget position, even if off screen + const transform = ctx.getTransform(); + const scale = app.canvas.ds.scale;//gets the litegraph zoom + //calculate coordinates with account for browser zoom + const bcr = app.canvas.canvas.getBoundingClientRect() + const x = transform.e*scale/transform.a + bcr.x; + const y = transform.f*scale/transform.a + bcr.y; + //TODO: text reflows at low zoom. investigate alternatives + Object.assign(parentDOM.style, { + left: (x+(n.pos[0] + n.size[0]+15)*scale) + "px", + top: (y+(n.pos[1]-LiteGraph.NODE_TITLE_HEIGHT)*scale) + "px", + width: "400px", + minHeight: "100px", + maxHeight: "600px", + overflowY: 'scroll', + transformOrigin: '0 0', + transform: 'scale(' + scale + ',' + scale +')', + fontSize: '18px', + backgroundColor: LiteGraph.NODE_DEFAULT_BGCOLOR, + boxShadow: '0 0 10px black', + borderRadius: '4px', + padding: '3px', + zIndex: 3, + position: "absolute", + display: 'inline', + }); + }); + function setCollapse(el, doCollapse) { + if (doCollapse) { + el.children[0].children[0].innerHTML = '+' + Object.assign(el.children[1].style, { + color: '#CCC', + overflowX: 'hidden', + width: '0px', + minWidth: 'calc(100% - 20px)', + textOverflow: 'ellipsis', + whiteSpace: 'nowrap', + }) + for (let child of el.children[1].children) { + if (child.style.display != 'none'){ + child.origDisplay = child.style.display + } + child.style.display = 'none' + } + } else { + el.children[0].children[0].innerHTML = '-' + Object.assign(el.children[1].style, { + color: '', + overflowX: '', + width: '100%', + minWidth: '', + textOverflow: '', + whiteSpace: '', + }) + for (let child of el.children[1].children) { + child.style.display = child.origDisplay + } + } + } + helpDOM.collapseOnClick = function() { + let doCollapse = this.children[0].innerHTML == '-' + setCollapse(this.parentElement, doCollapse) + } + helpDOM.selectHelp = function(name, value) { + //attempt to navigate to name in help + function collapseUnlessMatch(items,t) { + var match = items.querySelector('[vhs_title="' + t + '"]') + if (!match) { + for (let i of items.children) { + if (i.innerHTML.slice(0,t.length+5).includes(t)) { + match = i + break + } + } + } + if (!match) { + return null + } + //For longer documentation items with fewer collapsable elements, + //scroll to make sure the entirety of the selected item is visible + //This has the unfortunate side effect of trying to scroll the main + //window if the documentation windows is forcibly offscreen, + //but it's easy to simply scroll the main window back and seems to + //have no visual side effects + match.scrollIntoView(false) + window.scrollTo(0,0) + for (let i of items.querySelectorAll('.VHS_collapse')) { + if (i.contains(match)) { + setCollapse(i, false) + } else { + setCollapse(i, true) + } + } + return match + } + let target = collapseUnlessMatch(helpDOM, name) + if (target && value) { + collapseUnlessMatch(target, value) + } + } + let titleContext = document.createElement("canvas").getContext("2d") + titleContext.font = app.canvas.title_text_font; + helpDOM.calculateTitleLength = function(text) { + return titleContext.measureText(text).width + } + helpDOM.addHelp = function(node, nodeType, description) { + if (!description) { + return + } + //Pad computed size for the clickable question mark + let originalComputeSize = node.computeSize + node.computeSize = function() { + let size = originalComputeSize.apply(this, arguments) + if (!this.title) { + return size + } + let title_width = helpDOM.calculateTitleLength(this.title) + size[0] = Math.max(size[0], title_width + LiteGraph.NODE_TITLE_HEIGHT*2) + return size + } + + node.description = description + chainCallback(node, "onDrawForeground", function (ctx) { + if (this?.flags?.collapsed) { + return + } + //draw question mark + ctx.save() + ctx.font = 'bold 20px Arial' + ctx.fillText("?", this.size[0]-17, -8) + ctx.restore() + }) + chainCallback(node, "onMouseDown", function (e, pos, canvas) { + if (this?.flags?.collapsed) { + return + } + //On click would be preferred, but this'll be good enough + if (pos[1] < 0 && pos[0] + LiteGraph.NODE_TITLE_HEIGHT > this.size[0]) { + //corner question mark clicked + if (helpDOM.node == this) { + helpDOM.node = undefined + } else { + helpDOM.node = this; + helpDOM.innerHTML = this.description || "no help provided " + for (let e of helpDOM.querySelectorAll('.VHS_collapse')) { + e.children[0].onclick = helpDOM.collapseOnClick + e.children[0].style.cursor = 'pointer' + } + for (let e of helpDOM.querySelectorAll('.VHS_precollapse')) { + setCollapse(e, true) + } + for (let e of helpDOM.querySelectorAll('.VHS_loopedvideo')) { + e?.play() + } + helpDOM.parentElement.scrollTo(0,0) + } + return true + } + }) + let timeout = null + chainCallback(node, "onMouseMove", function (e, pos, canvas) { + if (timeout) { + clearTimeout(timeout) + timeout = null + } + if (helpDOM.node != this) { + return + } + timeout = setTimeout(() => { + let n = this + if (pos[0] > 0 && pos[0] < n.size[0] + && pos[1] > 0 && pos[1] < n.size[1]) { + //TODO: provide help specific to element clicked + let inputRows = Math.max(n.inputs?.length || 0, n.outputs?.length || 0) + if (pos[1] < LiteGraph.NODE_SLOT_HEIGHT * inputRows) { + let row = Math.floor((pos[1] - 7) / LiteGraph.NODE_SLOT_HEIGHT) + if (pos[0] < n.size[0]/2) { + if (row < n.inputs.length) { + helpDOM.selectHelp(n.inputs[row].name) + } + } else { + if (row < n.outputs.length) { + helpDOM.selectHelp(n.outputs[row].name) + } + } + } else { + //probably widget, but widgets have variable height. + let basey = LiteGraph.NODE_SLOT_HEIGHT * inputRows + 6 + for (let w of n.widgets) { + if (w.y) { + basey = w.y + } + let wheight = LiteGraph.NODE_WIDGET_HEIGHT+4 + if (w.computeSize) { + wheight = w.computeSize(n.size[0])[1] + } + if (pos[1] < basey + wheight) { + helpDOM.selectHelp(w.name, w.value) + break + } + basey += wheight + } + } + } + }, 500) + }) + chainCallback(node, "onMouseLeave", function (e, pos, canvas) { + if (timeout) { + clearTimeout(timeout) + timeout = null + } + }); + } +} + +function fitHeight(node) { + node.setSize([node.size[0], node.computeSize([node.size[0], node.size[1]])[1]]) + node?.graph?.setDirtyCanvas(true); +} +function startDraggingItems(node, pointer) { + app.canvas.emitBeforeChange() + app.canvas.graph?.beforeChange() + // Ensure that dragging is properly cleaned up, on success or failure. + pointer.finally = () => { + app.canvas.isDragging = false + app.canvas.graph?.afterChange() + app.canvas.emitAfterChange() + } + app.canvas.processSelect(node, pointer.eDown, true) + app.canvas.isDragging = true +} +function processDraggedItems(e) { + if (e.shiftKey || LiteGraph.alwaysSnapToGrid) + app.canvas?.graph?.snapToGrid(app.canvas.selectedItems) + app.canvas.dirty_canvas = true + app.canvas.dirty_bgcanvas = true + app.canvas.onNodeMoved?.(findFirstNode(app.canvas.selectedItems)) +} +function allowDragFromWidget(widget) { + widget.onPointerDown = function(pointer, node) { + pointer.onDragStart = () => startDraggingItems(node, pointer) + pointer.onDragEnd = processDraggedItems + app.canvas.dirty_canvas = true + return true + } +} + +//Cloud specific auth code. Short circuits if not on cloud +async function getAuthHeader() { + try { + const authStore = await api.getAuthStore() + return authStore ? await authStore.getAuthHeader() : null + } catch (error) { + console.warn('Failed to get auth header:', error) + return null + } +} + +async function uploadFile(file, progressCallback) { + try { + // Wrap file in formdata so it includes filename + const body = new FormData(); + const i = file.webkitRelativePath.lastIndexOf('/'); + const subfolder = file.webkitRelativePath.slice(0,i+1) + const new_file = new File([file], file.name, { + type: file.type, + lastModified: file.lastModified, + }); + body.append("image", new_file); + if (i > 0) { + body.append("subfolder", subfolder); + } + const url = api.apiURL("/upload/image") + const resp = await new Promise((resolve) => { + let req = new XMLHttpRequest() + req.upload.onprogress = (e) => progressCallback?.(e.loaded/e.total) + req.onload = () => resolve(req) + req.open('post', url, true) + getAuthHeader().then((headers) => { + headers ??= {} + for (const key in headers) + req.setRequestHeader(key, headers[key]) + req.send(body) + }) + }) + + if (resp.status !== 200) { + alert(resp.status + " - " + resp.statusText); + } + return resp + } catch (error) { + alert(error); + } +} + +function addVAEOutputToggle(nodeType, nodeData) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + this.reject_ue_connection = (input) => input?.name == "vae" + }) + chainCallback(nodeType.prototype, "onConnectionsChange", function(contype, slot, iscon, linfo) { + let slotType = this.inputs[slot]?.type + if (contype == LiteGraph.INPUT && slotType == "VAE") { + if (iscon && linfo) { + if (this.linkTimeout) { + clearTimeout(this.linkTimeout) + this.linkTimeout = false + } else if (this.outputs[0].type == "IMAGE") { + this.linkTimeout = setTimeout(() => { + if (this.outputs[0].type != "IMAGE") { + return + } + this.linkTimeout = false + this.disconnectOutput(0); + }, 50) + } + this.outputs[0].name = 'LATENT'; + this.outputs[0].type = 'LATENT'; + } else{ + if (this.outputs[0].type == "LATENT") { + this.linkTimeout = setTimeout(() => { + this.linkTimeout = false + this.disconnectOutput(0); + }, 50) + } + this.outputs[0].name = "IMAGE"; + this.outputs[0].type = "IMAGE"; + } + } + }); +} +function addVAEInputToggle(nodeType, nodeData) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + this.reject_ue_connection = (input) => input?.name == "vae" + }) + chainCallback(nodeType.prototype, "onConnectionsChange", function(contype, slot, iscon, linf) { + if (contype == LiteGraph.INPUT && slot == 3 && this.inputs[3].type == "VAE") { + if (iscon && linf) { + if (this.linkTimeout) { + clearTimeout(this.linkTimeout) + this.linkTimeout = false + } else if (this.inputs[0].type == "IMAGE") { + this.linkTimeout = setTimeout(() => { + //workaround for out of order loading + if (this.inputs[0].type != "IMAGE") { + return + } + this.linkTimeout = false + this.disconnectInput(0); + }, 50) + } + this.inputs[0].type = 'LATENT'; + } else { + if (this.inputs[0].type == "LATENT") { + this.linkTimeout = setTimeout(() => { + this.linkTimeout = false + this.disconnectInput(0); + }, 50) + } + this.inputs[0].type = "IMAGE"; + } + } + }); +} +function cloneType(nodeType, nodeData) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + this.changeOutputType = function (new_type) { + this.linkTimeout = setTimeout(() => { + this.linkTimeout = false + if (this.outputs[0].type != new_type) { + this.outputs[0].type = new_type + //check and potentially remove links + if (!this.outputs[0].links) { + return + } + let removed_links = [] + for (let link_id of this.outputs[0].links) { + let link = app.graph.links[link_id] + if (!link) + debugger + let target_node = app.graph.getNodeById(link.target_id) + let target_input = target_node.inputs[link.target_slot] + let keep = LiteGraph.isValidConnection(new_type, target_input.type) + if (!keep) { + link.disconnect(app.graph, 'input') + removed_links.push(link_id) + } + target_node.onConnectionsChange?.(LiteGraph.INPUT, + link.target_slot, keep, link, target_input) + } + this.outputs[0].links = this.outputs[0].links + .filter((v) => !removed_links.includes(v)) + } + }, 50) + } + this.changeOutputType("VHS_DUMMY_NONE") + }); + chainCallback(nodeType.prototype, "onConnectionsChange", function(contype, slot, iscon, linf) { + if (contype == LiteGraph.INPUT && slot == 0) { + let new_type = "VHS_DUMMY_NONE" + if (iscon && linf) { + new_type = app.graph.getNodeById(linf.origin_id).outputs[linf.origin_slot].type + } + if (this.linkTimeout) { + clearTimeout(this.linkTimeout) + } + this.changeOutputType(new_type) + } + }); +} + +function addDateFormatting(nodeType, field, timestamp_widget = false) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const widget = this.widgets.find((w) => w.name === field); + widget.serializeValue = () => { + return applyTextReplacements(app, widget.value); + }; + }); +} +function addTimestampWidget(nodeType, nodeData, targetWidget) { + const newWidgets = {}; + for (let key in nodeData.input.required) { + if (key == targetWidget) { + //TODO: account for duplicate entries? + newWidgets["timestamp_directory"] = ["BOOLEAN", {"default": true}] + } + newWidgets[key] = nodeData.input.required[key]; + } + nodeDta.input.required = newWidgets; + chainCallback(nodeType.prototype, "onNodeCreated", function () { + const directoryWidget = this.widgets.find((w) => w.name === "directory_name"); + const timestampWidget = this.widgets.find((w) => w.name === "timestamp_directory"); + directoryWidget.serializeValue = () => { + if (timestampWidget.value) { + //ignore actual value and return timestamp + return formatDate("yyyy-MM-ddThh:mm:ss", new Date()); + } + return directoryWidget.value + }; + timestampWidget._value = value; + Object.definteProperty(timestampWidget, "value", { + set : function(value) { + this._value = value; + directoryWidget.disabled = value; + }, + get : function() { + return this._value; + } + }); + }); +} +function initializeLoadFormat(nodeType, nodeData) { + if (!nodeData?.input?.optional?.format) { + return + } + chainCallback(nodeType.prototype, "onNodeCreated", function() { + let node = this + let formatWidget = this.widgets.find((w) => w.name === "format") + formatWidget.options.formats = nodeData.input.optional.format[1].formats + let base = {} + for (let widget of this.widgets) { + if (['force_rate', 'custom_width', 'custom_height', + 'frame_load_cap'].includes(widget.name)) { + //TODO: filter these options? + base[widget.name] = widget.options + } + } + chainCallback(formatWidget, "callback", function(value) { + let format = this.options.formats[value] + if (!format) { + return + } + if ('target_rate' in format) { + format.force_rate = {'reset': format.target_rate} + } + if ('dim' in format) { + format.custom_width = {'step': format.dim[0], 'mod': format.dim[1]} + format.custom_height = {'step': format.dim[0], 'mod': format.dim[1]} + if (format.dim[2]) { + format.custom_width.reset = format.dim[2] + } + if (format.dim[3]) { + format.custom_height.reset = format.dim[3] + } + } + if ('frames' in format) { + format.frame_load_cap = {'step': format.frames[0], 'mod': format.frames[1]} + } + for (let widget of node.widgets) { + if (widget.name in base) { + let wasDefault = widget.options?.reset == widget.value + widget.options = Object.assign({}, base[widget.name], format[widget.name]) + if (wasDefault && widget.options.reset != undefined) { + widget.value = widget.options.reset + } + widget.callback(widget.value) + } + } + + }); + let capWidget = this.widgets.find((w) => w.name === "frame_load_cap") + capWidget.annotation = (value, width) => { + let max_frames = this.video_query?.loaded?.frames + if (!max_frames || value && value < max_frames) { + return + } + let format = formatWidget.options.formats[formatWidget.value] + const div = format?.frames?.[0] ?? 1 + const mod = format?.frames?.[1] ?? 0 + let loadable_frames = max_frames + if ((max_frames % div) != mod) { + loadable_frames = ((max_frames - mod)/div|0) * div + mod + } + return loadable_frames + "\u21FD" + } + let rateWidget = this.widgets.find((w) => w.name === "force_rate") + rateWidget.annotation = (value, width) => { + if (value == 0 && this.video_query?.source?.fps != undefined) { + return roundToPrecision(this.video_query.source.fps, 2) + "\u21FD" + } + } + }); +} + +function addUploadWidget(nodeType, nodeData, widgetName, type="video") { + let accept = {'video': ["video/webm","video/mp4","video/x-matroska","image/gif"], + 'audio': ["audio/mpeg","audio/wav","audio/x-wav","audio/ogg"]} + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const node = this + const pathWidget = this.widgets.find((w) => w.name === widgetName); + const fileInput = document.createElement("input"); + chainCallback(this, "onRemoved", () => { + fileInput?.remove(); + }); + if (type == "folder") { + Object.assign(fileInput, { + type: "file", + style: "display: none", + webkitdirectory: true, + onchange: async () => { + const directory = fileInput.files[0].webkitRelativePath; + const i = directory.lastIndexOf('/'); + if (i <= 0) { + throw "No directory found"; + } + const path = directory.slice(0,directory.lastIndexOf('/')) + if (pathWidget.options.values.includes(path)) { + alert("A folder of the same name already exists"); + return; + } + let successes = 0; + const onProg = (p) => this.progress = (successes + p) / fileInput.files.length + for(const file of fileInput.files) { + if ((await uploadFile(file, onProg)).status == 200) { + successes++; + } else { + this.progress = undefined + //Upload failed, but some prior uploads may have succeeded + //Stop future uploads to prevent cascading failures + //and only add to list if an upload has succeeded + if (successes > 0) { + break + } else { + return; + } + } + } + this.progress = undefined + pathWidget.options.values.push(path); + pathWidget.value = path; + if (pathWidget.callback) { + pathWidget.callback(path) + } + }, + }); + } else { + let accept = {'video': ["video/webm","video/mp4","video/x-matroska","image/gif"], + 'audio': ["audio/mpeg","audio/wav","audio/x-wav","audio/ogg"]}[type] + async function doUpload(file) { + let resp = await uploadFile(file, (p) => node.progress = p) + node.progress = undefined + if (resp.status != 200) { + return false + } + const filename = JSON.parse(resp.responseText).name; + pathWidget.options.values.push(filename); + pathWidget.value = filename; + if (pathWidget.callback) { + pathWidget.callback(filename) + } + return true + } + Object.assign(fileInput, { + type: "file", + accept: accept.join(','), + style: "display: none", + onchange: async () => { + if (fileInput.files.length) { + return await doUpload(fileInput.files[0]) + } + }, + }); + this.onDragOver = (e) => !!e?.dataTransfer?.types?.includes?.('Files') + this.onDragDrop = async function(e) { + if (!e?.dataTransfer?.types?.includes?.('Files')) { + return false + } + //TODO: Allow dragging multiple files at once? + const item = e.dataTransfer?.files?.[0] + if (accept.includes(item?.type)) { + return await doUpload(item) + } + return false + } + } + document.body.append(fileInput); + let uploadWidget = this.addWidget("button", "choose " + type + " to upload", "image", () => { + //clear the active click event + app.canvas.node_widget = null + + fileInput.click(); + }); + uploadWidget.options.serialize = false; + + + }); +} +function addAudioPreview(nodeType, isInput=true) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + var element = document.createElement("audio"); + element.controls = true + const previewNode = this; + var previewWidget = this.addDOMWidget("audiopreview", "preview", element, { + serialize: false, + hideOnZoom: true, + getValue() { + return element.value; + }, + setValue(v) { + element.value = v; + }, + }); + previewWidget.computeSize = function(width) { + return [width, 50]; + } + var timeout = null; + this.updateParameters = (params, force_update) => { + if (!previewWidget.value.params) { + if(typeof(previewWidget.value) != 'object') { + previewWidget.value = {} + } + previewWidget.value.params = {} + } + Object.assign(previewWidget.value.params, params) + if (!force_update && + app.ui.settings.getSettingValue("VHS.AdvancedPreviews") == 'Never') { + return; + } + if (timeout) { + clearTimeout(timeout); + } + if (force_update) { + previewWidget.updateSource(); + } else { + timeout = setTimeout(() => previewWidget.updateSource(),100); + } + }; + previewWidget.updateSource = function () { + if (this.value.params == undefined) { + return; + } + let params = {} + let advp = app.ui.settings.getSettingValue("VHS.AdvancedPreviews") + if (advp == 'Never') { + advp = false + } else if (advp == 'Input Only') { + advp = isInput + } else { + advp = true + } + Object.assign(params, this.value.params);//shallow copy + params.timestamp = Date.now() + if (!advp) { + element.src = api.apiURL('/view?' + new URLSearchParams(params)); + } else { + params.deadline = app.ui.settings.getSettingValue("VHS.AdvancedPreviewsDeadline") + element.src = api.apiURL('/vhs/viewaudio?' + new URLSearchParams(params)); + } + } + previewWidget.callback = previewWidget.updateSource + + + //setup widget tracking + function update(key) { + return function(value) { + let params = {} + params[key] = this.value + previewNode?.updateParameters(params) + } + } + let widgetMap = { 'seek_seconds': 'start_time', 'duration': 'duration', + 'start_time': 'start_time' } + for (let widget of this.widgets) { + if (widget.name in widgetMap) { + if (typeof(widgetMap[widget.name]) == 'function') { + chainCallback(widget, "callback", widgetMap[widget.name]); + } else { + chainCallback(widget, "callback", update(widgetMap[widget.name])) + } + } + if (widget.type != "button") { + widget.callback?.(widget.value) + } + } + }); +} + +function addVideoPreview(nodeType, isInput=true) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + var element = document.createElement("div"); + const previewNode = this; + var previewWidget = this.addDOMWidget("videopreview", "preview", element, { + serialize: false, + hideOnZoom: false, + getValue() { + return element.value; + }, + setValue(v) { + element.value = v; + }, + }); + allowDragFromWidget(previewWidget) + previewWidget.computeSize = function(width) { + if (this.aspectRatio && !this.parentEl.hidden) { + let height = (previewNode.size[0]-20)/ this.aspectRatio + 10; + if (!(height > 0)) { + height = 0; + } + this.computedHeight = height + 10; + return [width, height]; + } + return [width, -4];//no loaded src, widget should not display + } + element.addEventListener('contextmenu', (e) => { + e.preventDefault() + return app.canvas._mousedown_callback(e) + }, true); + element.addEventListener('pointerdown', (e) => { + e.preventDefault() + return app.canvas._mousedown_callback(e) + }, true); + element.addEventListener('mousewheel', (e) => { + e.preventDefault() + return app.canvas._mousewheel_callback(e) + }, true); + element.addEventListener('pointermove', (e) => { + e.preventDefault() + return app.canvas._mousemove_callback(e) + }, true); + element.addEventListener('pointerup', (e) => { + e.preventDefault() + return app.canvas._mouseup_callback(e) + }, true); + element.addEventListener('dragover', (e) => { + //A little hacky, but allows drag events onto the preview itself + e.preventDefault(); + e.dataTransfer.dropEffect = "copy"; + app.dragOverNode = this + }) + previewWidget.value = {hidden: false, paused: false, params: {}, + muted: app.ui.settings.getSettingValue("VHS.DefaultMute")} + previewWidget.parentEl = document.createElement("div"); + previewWidget.parentEl.className = "vhs_preview"; + previewWidget.parentEl.style['width'] = "100%" + element.appendChild(previewWidget.parentEl); + previewWidget.videoEl = document.createElement("video"); + previewWidget.videoEl.controls = false; + previewWidget.videoEl.loop = true; + previewWidget.videoEl.muted = true; + previewWidget.videoEl.style['width'] = "100%" + previewWidget.videoEl.addEventListener("loadedmetadata", () => { + + previewWidget.aspectRatio = previewWidget.videoEl.videoWidth / previewWidget.videoEl.videoHeight; + fitHeight(this); + }); + previewWidget.videoEl.addEventListener("error", () => { + //TODO: consider a way to properly notify the user why a preview isn't shown. + previewWidget.parentEl.hidden = true; + fitHeight(this); + }); + previewWidget.videoEl.onmouseenter = () => { + previewWidget.videoEl.muted = previewWidget.value.muted + }; + previewWidget.videoEl.onmouseleave = () => { + previewWidget.videoEl.muted = true; + }; + + previewWidget.imgEl = document.createElement("img"); + previewWidget.imgEl.style['width'] = "100%" + previewWidget.imgEl.hidden = true; + previewWidget.imgEl.onload = () => { + previewWidget.aspectRatio = previewWidget.imgEl.naturalWidth / previewWidget.imgEl.naturalHeight; + fitHeight(this); + }; + previewWidget.parentEl.appendChild(previewWidget.videoEl) + previewWidget.parentEl.appendChild(previewWidget.imgEl) + var timeout = null; + this.updateParameters = (params, force_update) => { + if (!previewWidget.value.params) { + if(typeof(previewWidget.value) != 'object') { + previewWidget.value = {hidden: false, paused: false} + } + previewWidget.value.params = {} + } + if (!Object.entries(params).some(([k,v]) => previewWidget.value.params[k] !== v)) { + return + } + Object.assign(previewWidget.value.params, params) + if (!force_update && + app.ui.settings.getSettingValue("VHS.AdvancedPreviews") == 'Never') { + return; + } + if (timeout) { + clearTimeout(timeout); + } + if (force_update) { + previewWidget.updateSource(); + } else { + timeout = setTimeout(() => previewWidget.updateSource(),100); + } + }; + previewWidget.updateSource = function () { + if (this.value.params == undefined) { + return; + } + let params = {} + let advp = app.ui.settings.getSettingValue("VHS.AdvancedPreviews") + if (advp == 'Never') { + advp = false + } else if (advp == 'Input Only') { + advp = isInput + } else { + advp = true + } + Object.assign(params, this.value.params);//shallow copy + params.timestamp = Date.now() + this.parentEl.hidden = this.value.hidden; + if (params.format?.split('/')[0] == 'video' + || advp && (params.format?.split('/')[1] == 'gif') + || params.format == 'folder') { + + this.videoEl.autoplay = !this.value.paused && !this.value.hidden; + if (!advp) { + this.videoEl.src = api.apiURL('/view?' + new URLSearchParams(params)); + } else { + let target_width = (previewNode.size[0]-20)*2 || 256; + let minWidth = app.ui.settings.getSettingValue("VHS.AdvancedPreviewsMinWidth") + if (target_width < minWidth) { + target_width = minWidth + } + if (!params.custom_width || !params.custom_height) { + params.force_size = target_width+"x?" + } else { + let ar = params.custom_width/params.custom_height + params.force_size = target_width+"x"+(target_width/ar) + } + params.deadline = app.ui.settings.getSettingValue("VHS.AdvancedPreviewsDeadline") + this.videoEl.src = api.apiURL('/vhs/viewvideo?' + new URLSearchParams(params)); + } + this.videoEl.hidden = false; + this.imgEl.hidden = true; + } else if (params.format?.split('/')[0] == 'image'){ + //Is animated image + this.imgEl.src = api.apiURL('/view?' + new URLSearchParams(params)); + this.videoEl.hidden = true; + this.imgEl.hidden = false; + } + delete previewNode.video_query + const doQuery = async () => { + if (!previewWidget?.value?.params?.filename) { + return + } + let qurl = api.apiURL('/vhs/queryvideo?' + new URLSearchParams(previewWidget.value.params)) + let query = undefined + try { + let query_res = await fetch(qurl) + query = await query_res.json() + } catch(e) { + return + } + previewNode.video_query = query + } + doQuery() + } + previewWidget.callback = previewWidget.updateSource + previewWidget.parentEl.appendChild(previewWidget.videoEl) + previewWidget.parentEl.appendChild(previewWidget.imgEl) + }); +} +let copiedPath = undefined +function addPreviewOptions(nodeType) { + chainCallback(nodeType.prototype, "getExtraMenuOptions", function(_, options) { + // The intended way of appending options is returning a list of extra options, + // but this isn't used in widgetInputs.js and would require + // less generalization of chainCallback + let optNew = [] + const previewWidget = this.widgets.find((w) => w.name === "videopreview"); + + let url = null + if (previewWidget.videoEl?.hidden == false && previewWidget.videoEl.src) { + if (['input', 'output', 'temp'].includes(previewWidget.value.params.type)) { + //Use full quality video + url = api.apiURL('/view?' + new URLSearchParams(previewWidget.value.params)); + //Workaround for 16bit png: Just do first frame + url = url.replace('%2503d', '001') + } + } else if (previewWidget.imgEl?.hidden == false && previewWidget.imgEl.src) { + url = previewWidget.imgEl.src; + url = new URL(url); + } + if (this.video_query?.source) { + let info_string = this.video_query.source.size.join('x') + + '@' + this.video_query.source.fps + 'fps ' + + this.video_query.source.frames + 'frames' + optNew.push({content: info_string, disabled: true}) + } + if (url) { + optNew.push( + { + content: "Open preview", + callback: () => { + window.open(url, "_blank") + }, + }, + { + content: "Save preview", + callback: () => { + const a = document.createElement("a"); + a.href = url; + a.setAttribute("download", previewWidget.value.params.filename); + document.body.append(a); + a.click(); + requestAnimationFrame(() => a.remove()); + }, + } + ); + if (previewWidget.value.params.fullpath) { + copiedPath = previewWidget.value.params.fullpath + const blob = new Blob([previewWidget.value.params.fullpath], + { type: 'text/plain'}) + optNew.push({ + content: "Copy output filepath", + callback: async () => { + await navigator.clipboard.write([ + new ClipboardItem({ + 'text/plain': blob + })])} + }); + } + if (previewWidget.value.params.workflow) { + let wParams = {...previewWidget.value.params, + filename: previewWidget.value.params.workflow} + let wUrl = api.apiURL('/view?' + new URLSearchParams(wParams)); + optNew.push({ + content: "Save workflow image", + callback: () => { + const a = document.createElement("a"); + a.href = wUrl; + a.setAttribute("download", previewWidget.value.params.workflow); + document.body.append(a); + a.click(); + requestAnimationFrame(() => a.remove()); + } + }); + } + } + const PauseDesc = (previewWidget.value.paused ? "Resume" : "Pause") + " preview"; + if(previewWidget.videoEl.hidden == false) { + optNew.push({content: PauseDesc, callback: () => { + //animated images can't be paused and are more likely to cause performance issues. + //changing src to a single keyframe is possible, + //For now, the option is disabled if an animated image is being displayed + if(previewWidget.value.paused) { + previewWidget.videoEl?.play(); + } else { + previewWidget.videoEl?.pause(); + } + previewWidget.value.paused = !previewWidget.value.paused; + }}); + } + //TODO: Consider hiding elements if no video preview is available yet. + //It would reduce confusion at the cost of functionality + //(if a video preview lags the computer, the user should be able to hide in advance) + const visDesc = (previewWidget.value.hidden ? "Show" : "Hide") + " preview"; + optNew.push({content: visDesc, callback: () => { + if (!previewWidget.videoEl.hidden && !previewWidget.value.hidden) { + previewWidget.videoEl.pause(); + } else if (previewWidget.value.hidden && !previewWidget.videoEl.hidden && !previewWidget.value.paused) { + previewWidget.videoEl.play(); + } + previewWidget.value.hidden = !previewWidget.value.hidden; + previewWidget.parentEl.hidden = previewWidget.value.hidden; + fitHeight(this); + + }}); + optNew.push({content: "Sync preview", callback: () => { + //TODO: address case where videos have varying length + //Consider a system of sync groups which are opt-in? + for (let p of document.getElementsByClassName("vhs_preview")) { + for (let child of p.children) { + if (child.tagName == "VIDEO") { + child.currentTime=0; + } else if (child.tagName == "IMG") { + child.src = child.src; + } + } + } + }}); + const muteDesc = (previewWidget.value.muted ? "Unmute" : "Mute") + " Preview" + optNew.push({content: muteDesc, callback: () => { + previewWidget.value.muted = !previewWidget.value.muted + }}) + if(options.length > 0 && options[0] != null && optNew.length > 0) { + optNew.push(null); + } + options.unshift(...optNew); + }); +} +function addFormatWidgets(nodeType, nodeData) { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + var formatWidget = null; + var formatWidgetIndex = -1; + for(let i = 0; i < this.widgets.length; i++) { + if (this.widgets[i].name === "format"){ + formatWidget = this.widgets[i]; + formatWidgetIndex = i+1; + break + } + } + let formatWidgetsCount = 0; + chainCallback(formatWidget, "callback", (value) => { + const formats = (LiteGraph.registered_node_types[this.type] + ?.nodeData?.input?.required?.format?.[1]?.formats) + let newWidgets = []; + if (formats?.[value]) { + let formatWidgets = formats[value] + for (let wDef of formatWidgets) { + let type = wDef[2]?.widgetType ?? wDef[1] + if (Array.isArray(type)) { + type = "COMBO" + } + app.widgets[type](this, wDef[0], wDef.slice(1), app) + let w = this.widgets.pop() + w.config = wDef.slice(1) + newWidgets.push(w) + } + } + let removed = this.widgets.splice(formatWidgetIndex, + formatWidgetsCount, ...newWidgets); + let newNames = new Set(newWidgets.map((w) => w.name)) + for (let w of removed) { + w?.onRemove?.() + if (w.name in newNames) { + continue + } + //I do not like the performance of this, but it's safe + let slot = this.inputs.findIndex((i) => i.name == w.name) + if (slot >= 0) { + this.removeInput(slot) + } + } + for (let w of newWidgets) { + let existingInput = this.inputs.find((i) => i.name == w.name) + if (existingInput) { + setWidgetConfig(existingInput, w.config) + //TODO: Consider forcing disconnection if props change? + } else { + //NOTE: config is applied in wrapped addInput call + this.addInput(w.name, w.config[0], {widget: {name: w.name}}) + } + } + fitHeight(this); + formatWidgetsCount = newWidgets.length; + }); + }); +} +function addLoadCommon(nodeType, nodeData) { + addVideoPreview(nodeType); + initializeLoadFormat(nodeType, nodeData) + addPreviewOptions(nodeType); + chainCallback(nodeType.prototype, "onNodeCreated", function() { + //widget.callback adds unused arguements which need culling + const node = this + function update(key) { + return function(value) { + let params = {} + params[key] = this.value + node?.updateParameters(params) + } + } + let prior_ar = -2 + const widthWidget = this.widgets.find((w) => w.name === "custom_width"); + const heightWidget = this.widgets.find((w) => w.name === "custom_height"); + function updateAR(value) { + let new_ar = -1 + if (widthWidget.value & heightWidget.value) { + new_ar = widthWidget.value / heightWidget.value + } + if (new_ar != prior_ar) { + node?.updateParameters({'custom_width': widthWidget.value, + 'custom_height': heightWidget.value}) + prior_ar = new_ar + } + } + const offsetWidget = this.widgets.find((w) => w.name === "start_time"); + if (offsetWidget) { + Object.defineProperty(offsetWidget.options, "step", { + set : (value) => {}, + get : () => { + return 1 / (this.video_query?.loaded?.fps ?? 1) + } + }) + } + let widgetMap = {'frame_load_cap': 'frame_load_cap', + 'skip_first_frames': 'skip_first_frames', 'select_every_nth': 'select_every_nth', + 'start_time': 'start_time', 'force_rate': 'force_rate', + 'custom_width': updateAR, 'custom_height': updateAR, + 'image_load_cap': 'image_load_cap', 'skip_first_images': 'skip_first_images' + } + for (let widget of this.widgets) { + if (widget.name in widgetMap) { + if (typeof(widgetMap[widget.name]) == 'function') { + chainCallback(widget, "callback", widgetMap[widget.name]); + } else { + chainCallback(widget, "callback", update(widgetMap[widget.name])) + } + } + if (widget.type != "button") { + widget.callback?.(widget.value) + } + } + }); +} + +function path_stem(path) { + let i = path.lastIndexOf("/"); + if (i >= 0) { + return [path.slice(0,i+1),path.slice(i+1)]; + } + return ["",path]; +} +function searchBox(event, [x,y], node) { + //Ensure only one dialogue shows at a time + if (this.prompt) + return; + this.prompt = true; + + let pathWidget = this; + let dialog = document.createElement("div"); + dialog.className = "litegraph litesearchbox graphdialog rounded" + dialog.innerHTML = 'Path OK' + dialog.close = () => { + dialog.remove(); + } + document.body.append(dialog); + if (app.canvas.ds.scale > 1) { + dialog.style.transform = "scale(" + app.canvas.ds.scale + ")"; + } + var name_element = dialog.querySelector(".name"); + var input = dialog.querySelector(".value"); + var options_element = dialog.querySelector(".helper"); + input.value = pathWidget.value; + + var timeout = null; + let last_path = null; + let extensions = pathWidget.options.vhs_path_extensions + + input.addEventListener("keydown", (e) => { + dialog.is_modified = true; + if (e.keyCode == 27) { + //ESC + dialog.close(); + } else if (e.keyCode == 13 && e.target.localName != "textarea") { + pathWidget.value = input.value; + if (pathWidget.callback) { + pathWidget.callback(pathWidget.value); + } + dialog.close(); + } else { + if (e.keyCode == 9) { + //TAB + input.value = last_path + options_element.firstChild.innerText; + e.preventDefault(); + e.stopPropagation(); + } else if (e.ctrlKey && (e.keyCode == 87 || e.keyCode == 66)) { + //Ctrl+w or Ctrl+b + //most browsers won't support, but it's good QOL for those that do + input.value = path_stem(input.value.slice(0,-1))[0] + e.preventDefault(); + e.stopPropagation(); + } else if (e.ctrlKey && e.keyCode == 71) { + //Ctrl+g + //Temporarily disables extension filtering to show all files + e.preventDefault(); + e.stopPropagation(); + extensions = undefined + last_path = null; + } + if (timeout) { + clearTimeout(timeout); + } + timeout = setTimeout(updateOptions, 10); + return; + } + this.prompt=false; + e.preventDefault(); + e.stopPropagation(); + }); + + var button = dialog.querySelector("button"); + button.addEventListener("click", (e) => { + pathWidget.value = input.value; + if (pathWidget.callback) { + pathWidget.callback(pathWidget.value); + } + //unsure why dirty is set here, but not on enter-key above + node.graph.setDirtyCanvas(true); + dialog.close(); + this.prompt = false; + }); + var rect = app.canvas.canvas.getBoundingClientRect(); + var offsetx = -20; + var offsety = -20; + if (rect) { + offsetx -= rect.left; + offsety -= rect.top; + } + + if (event) { + dialog.style.left = event.clientX + offsetx + "px"; + dialog.style.top = event.clientY + offsety + "px"; + } else { + dialog.style.left = canvas.width * 0.5 + offsetx + "px"; + dialog.style.top = canvas.height * 0.5 + offsety + "px"; + } + //Search code + let options = [] + function addResult(name, isDir) { + let el = document.createElement("div"); + el.innerText = name; + el.className = "litegraph lite-search-item"; + if (isDir) { + el.className += " is-dir"; + el.addEventListener("click", (e) => { + input.value = last_path+name + if (timeout) { + clearTimeout(timeout); + } + timeout = setTimeout(updateOptions, 10); + }); + } else { + el.addEventListener("click", (e) => { + pathWidget.value = last_path+name; + if (pathWidget.callback) { + pathWidget.callback(pathWidget.value); + } + dialog.close(); + pathWidget.prompt = false; + }); + } + options_element.appendChild(el); + } + async function updateOptions() { + timeout = null; + let [path, remainder] = path_stem(input.value); + if (last_path != path) { + //fetch options. Must block execution here, so update should be async? + let params = {path : path} + if (extensions) { + params.extensions = extensions + } + let optionsURL = api.apiURL('/vhs/getpath?' + new URLSearchParams(params)); + try { + let resp = await fetch(optionsURL); + options = await resp.json(); + options = options.map((o) => o.replace('.','\0')) + options = options.sort() + options = options.map((o) => o.replace('\0','.')) + } catch(e) { + options = [] + } + last_path = path; + } + options_element.innerHTML = ''; + //filter options based on remainder + for (let option of options) { + if (option.startsWith(remainder)) { + let isDir = option.endsWith('/') + addResult(option, isDir); + } + } + } + + setTimeout(async function() { + input.focus(); + await updateOptions(); + }, 10); + + return dialog; +} +function button_action(widget) { + if ( + widget.options?.reset == undefined && + widget.options?.disable == undefined + ) { + return 'None' + } + if ( + widget.options.reset != undefined && + widget.value != widget.options.reset + ) { + return 'Reset' + } + if ( + widget.options.disable != undefined && + widget.value != widget.options.disable + ) { + return 'Disable' + } + if (widget.options.reset != undefined) { + return 'No Reset' + } + return 'No Disable' +} +function fitText(ctx, text, maxLength) { + if (maxLength <= 0) { + return ['', 0] + } + let fullLength = ctx.measureText(text).width + if (fullLength < maxLength) { + return [text, fullLength] + } + //determine approx safe cutoff + let cutoff = maxLength / fullLength * text.length | 0 + let shortened = text.slice(0, Math.max(0, cutoff - 2)) + '…' + return [shortened, ctx.measureText(shortened).width] +} +function fitPath(ctx, path, maxLength) { + let fullLength = ctx.measureText(path).width + if (fullLength < maxLength) { + return [path, fullLength] + } + //determine approx safe cutoff + let len = (maxLength / fullLength * path.length | 0) - 1 + + let displayPath = '' + let filename = path_stem(path)[1] + if (filename.length > len-2) { + //may all fit, but can't squeeze more info + displayPath = filename.substr(0,len); + } else { + //TODO: find solution for windows, path[1] == ':'? + let isAbs = path[0] == '/'; + let partial = path.substr(path.length - (isAbs ? len-2:len-1)) + let cutoff = partial.indexOf('/'); + if (cutoff < 0) { + //Can occur, but there isn't a nicer way to format + displayPath = path.substr(path.length-len); + } else { + displayPath = (isAbs ? '/…':'…') + partial.substr(cutoff); + } + } + return [displayPath, ctx.measureText(displayPath).width] +} +function roundToPrecision(num, precision) { + let strnum = Number(num).toFixed(precision) + let deci = strnum.indexOf('.') + if (deci > 0) { + let i = strnum.length - 1 + while (i > deci && strnum[i] == '0') { + i-- + } + if (i == deci) { + i-- + } + return strnum.slice(0, i+1) + } + return strnum +} +function inner_value_change(widget, value, node, pos) { + widget.value = value + if (widget.options?.property && widget.options.property in node.properties) { + node.setProperty(widget.options.property, value) + } + if (widget.callback) { + widget.callback(widget.value, app.canvas, node, event) + } +} +function drawAnnotated(ctx, node, widget_width, y, H) { + const litegraph_base = LiteGraph + // In vueNodes mode, always show text since Vue renders at 1:1 scale + const show_text = LiteGraph.vueNodesMode || app.canvas.ds.scale >= (app.canvas.low_quality_zoom_threshold ?? 0.5) + const margin = 15 + ctx.strokeStyle = litegraph_base.WIDGET_OUTLINE_COLOR + ctx.fillStyle = litegraph_base.WIDGET_BGCOLOR + ctx.beginPath() + if (show_text) + ctx.roundRect(margin, y, widget_width - margin * 2, H, [H * 0.5]) + else ctx.rect(margin, y, widget_width - margin * 2, H) + ctx.fill() + if (show_text) { + if (!this.disabled) ctx.stroke() + const button = button_action(this) + if (button != 'None') { + ctx.save() + if (button.startsWith('No ')) { + ctx.fillStyle = litegraph_base.WIDGET_OUTLINE_COLOR + ctx.strokeStyle = litegraph_base.WIDGET_OUTLINE_COLOR + } else { + ctx.fillStyle = litegraph_base.WIDGET_TEXT_COLOR + ctx.strokeStyle = litegraph_base.WIDGET_TEXT_COLOR + } + ctx.beginPath() + if (button.endsWith('Reset')) { + ctx.arc(widget_width - margin - 26, y + H/2, 4, Math.PI*3/2, Math.PI) + ctx.stroke() + ctx.beginPath() + ctx.moveTo(widget_width - margin - 26, y + H/2 - 1.5) + ctx.lineTo(widget_width - margin - 26, y + H/2 - 6.5) + ctx.lineTo(widget_width - margin - 30, y + H/2 - 3.5) + ctx.fill() + } else { + ctx.arc(widget_width - margin - 26, y + H/2, 4, Math.PI*2/3, Math.PI*8/3) + ctx.moveTo(widget_width - margin - 26 - 8 ** .5, y + H/2 + 8 ** .5) + ctx.lineTo(widget_width - margin - 26 + 8 ** .5, y + H/2 - 8 ** .5) + ctx.stroke() + } + ctx.restore() + } + ctx.fillStyle = litegraph_base.WIDGET_TEXT_COLOR + if (!this.disabled) { + ctx.beginPath() + ctx.moveTo(margin + 16, y + 5) + ctx.lineTo(margin + 6, y + H * 0.5) + ctx.lineTo(margin + 16, y + H - 5) + ctx.fill() + ctx.beginPath() + ctx.moveTo(widget_width - margin - 16, y + 5) + ctx.lineTo(widget_width - margin - 6, y + H * 0.5) + ctx.lineTo(widget_width - margin - 16, y + H - 5) + ctx.fill() + } + let freeWidth = widget_width - (40 + margin * 2 + 20) + let [valueText, valueWidth] = fitText(ctx, (this.displayValue?.() ?? ""), freeWidth) + freeWidth -= valueWidth + + ctx.textAlign = 'left' + ctx.fillStyle = litegraph_base.WIDGET_SECONDARY_TEXT_COLOR + if (freeWidth > 20) { + let [name, nameWidth] = fitText(ctx, this.label || this.name, freeWidth) + freeWidth -= nameWidth + ctx.fillText(name, margin * 2 + 5, y + H * 0.7) + } + + let value_offset = margin * 2 + 20 + ctx.textAlign = 'right' + if (this.options.unit) { + ctx.fillStyle = litegraph_base.WIDGET_OUTLINE_COLOR + let [unitText, unitWidth] = fitText(ctx, this.options.unit, freeWidth) + if (unitText == this.options.unit) { + ctx.fillText(this.options.unit, widget_width - value_offset, y + H * 0.7) + value_offset += unitWidth + freeWidth -= unitWidth + } + } + ctx.fillStyle = litegraph_base.WIDGET_TEXT_COLOR + ctx.fillText(valueText, widget_width - value_offset, y + H * 0.7) + ctx.fillStyle = litegraph_base.WIDGET_SECONDARY_TEXT_COLOR + + + let annotation = '' + if (this.annotation) { + annotation = this.annotation(this.value, freeWidth) + } else if ( + this.options.annotation && + this.value in this.options.annotation + ) { + annotation = this.options.annotation[this.value] + } + if (annotation) { + ctx.fillStyle = litegraph_base.WIDGET_OUTLINE_COLOR + let [annoDisplay, annoWidth] = fitText(ctx, annotation, freeWidth) + ctx.fillText( + annoDisplay, + widget_width - 5 - valueWidth - value_offset, + y + H * 0.7 + ) + } + } +} +function mouseAnnotated(event, [x, y], node) { + //NOTE: Mouse actions contain no history element. + //This can cause overlapping actions since each triggers on different event type (down/move/up) + //TODO: Consider further rework + const widget_width = this.width || node.size[0] + const old_value = this.value + const margin = 15 + let isButton = 0 + if (x > margin + 6 && x < margin + 16) { + isButton = -1 + } else if (x > widget_width - margin - 16 & x < widget_width - margin - 6) { + isButton = 1 + } else if (x > widget_width - margin - 34 && x < widget_width - margin - 18) { + isButton = 2 + } + var allow_scroll = true + if (allow_scroll && event.type == 'pointermove') { + if (event.deltaX) + this.value += event.deltaX * (this.options.step || 1) + if (this.options.min != null && this.value < this.options.min) { + this.value = this.options.min + } + if (this.options.max != null && this.value > this.options.max) { + this.value = this.options.max + } + } else if (event.type == 'pointerdown') { + const buttonType = button_action(this) + if (isButton == 2) { + if (buttonType == 'Reset') { + this.value = this.options.reset + } else if (buttonType == 'Disable') { + this.value = this.options.disable + } + } else { + this.value += isButton * (this.options.step || 1) + if (this.options.min != null && this.value < this.options.min) { + this.value = this.options.min + } + if (this.options.max != null && this.value > this.options.max) { + this.value = this.options.max + } + } + } //end mousedown + else if (event.type == 'pointerup') { + if (event.click_time < 200 && !isButton) { + const d_callback = (v) => { + this.value = this.parseValue?.(v) ?? Number(v) + inner_value_change(this, this.value, node, [x, y]) + } + const dialog = app.canvas.prompt( + 'Value', + this.value,//TODO: Consider making this displayValue? + d_callback, + event + ) + const input = dialog.querySelector(".value") + input.addEventListener("keydown", (e) => { + if (e.keyCode == 9) { + e.preventDefault(); + e.stopPropagation(); + d_callback(input.value) + dialog.close() + node?.graph?.setDirtyCanvas(true); + let i = node.widgets.findIndex((w) => w == this) + if (e.shiftKey) + i-- + else + i++ + if (node.widgets[i]?.type == "VHS.ANNOTATED") {//restrict to annotatedNUmbers + node.widgets[i]?.mouse(event, [x, y+24], node) + } + } + }) + } + } + + if (old_value != this.value) + setTimeout( + function () { + inner_value_change(this, this.value, node, [x, y]) + }.bind(this), + 20 + ) + return true +} +let latentPreviewNodes = new Set() +app.registerExtension({ + name: "VideoHelperSuite.Core", + settings: [ + { + id: 'VHS.AdvancedPreviews', + category: ['🎥🅥🅗🅢', 'Previews', 'Advanced Previews'], + name: 'Advanced Previews', + tooltip: 'Automatically transcode previews on request. Required for advanced functionality', + type: 'combo', + options: ['Never', 'Always', 'Input Only'], + defaultValue: 'Input Only', + }, + { + id: 'VHS.AdvancedPreviewsMinWidth', + category: ['🎥🅥🅗🅢', 'Previews', 'Min Width'], + name: 'Minimum preview width', + tooltip: 'Advanced previews have their resolution downscaled to the node size for performance. While a node can be resized to increase preview quality, a minimum width can be set that previews won\'t be downscaled beneath. Preveiws will never be upscaled, so this can safely be set large.', + type: 'number', + attrs: { + min: 0, + step: 1, + max: 3840, + }, + defaultValue: 0, + }, + { + id: 'VHS.AdvancedPreviewsDeadline', + category: ['🎥🅥🅗🅢', 'Previews', 'Deadline'], + name: 'Deadline', + tooltip: 'Determines how much time can be spent when encoding advanced previews. Realtime results in reduced quality, but good will likely cause the preview to stutter as initial generation occurs', + type: 'combo', + options: ['realtime', 'good'], + defaultValue: 'realtime', + }, + { + id: 'VHS.AdvancedPreviewsDefaultMute', + category: ['🎥🅥🅗🅢', 'Previews', 'Default Mute'], + name: 'Mute videos by default', + type: 'boolean', + defaultValue: false, + }, + { + id: 'VHS.LatentPreview', + category: ['🎥🅥🅗🅢', 'Sampling', 'Latent Previews'], + name: 'Display animated previews when sampling', + type: 'boolean', + defaultValue: false, + onChange(value) { + if (!value) { + //Remove any previewWidgets + for (let id of latentPreviewNodes) { + let n = app.graph.getNodeById(id) + let i = n?.widgets?.findIndex((w) => w.name == 'vhslatentpreview') + if (i >= 0) { + n.widgets.splice(i,1)[0].onRemove() + } + } + latentPreviewNodes = new Set() + } + }, + }, + { + id: "VHS.LatentPreviewRate", + category: ['🎥🅥🅗🅢', 'Sampling', 'Latent Preview Rate'], + name: "Playback rate override.", + type: 'number', + attrs: { + min: 0, + step: 1, + max: 60 + }, + tooltip: + 'Force a specific frame rate for the playback of latent frames. This should not be confused with the output frame rate and will not match for video models.', + defaultValue: 0, + }, + { + id: 'VHS.MetadataImage', + category: ['🎥🅥🅗🅢', 'Output', 'MetadataImage'], + name: 'Save png of first frame for metadata', + type: 'boolean', + defaultValue: true, + }, + { + id: 'VHS.KeepIntermediate', + category: ['🎥🅥🅗🅢', 'Output', 'Keep Intermediate'], + name: 'Keep required intermediate files after sucessful execution', + type: 'boolean', + defaultValue: true, + }, + ], + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if(nodeData?.name?.startsWith("VHS_")) { + useKVState(nodeType); + if (nodeData.description) { + let description = nodeData.description + let el = document.createElement("div") + el.innerHTML = description + if (!el.children.length) { + //Is plaintext. Do minor convenience formatting + let chunks = description.split('\n') + nodeData.description = chunks[0] + description = chunks.join('') + } else { + nodeData.description = el.querySelector('#VHS_shortdesc')?.innerHTML || el.children[1]?.firstChild?.innerHTML + } + chainCallback(nodeType.prototype, "onNodeCreated", function () { + helpDOM.addHelp(this, nodeType, description) + this.setSize(this.computeSize()) + }) + } + //set widgetType to use VHS widgets where possible + for(let inp of Object.values({...nodeData.input?.required, ...nodeData.input?.optional})) { + if (["INT", "FLOAT"].includes(inp[0])) { + if (!inp[1]) { + inp[1] = {} + } + inp[1].widgetType ??= "VHS" + inp[0] + } + } + chainCallback(nodeType.prototype, "onNodeCreated", function () { + let new_widgets = [] + if (this.widgets) { + for (let w of this.widgets) { + let input = this.constructor.nodeData.input + let config = input?.required[w.name] ?? input.optional[w.name] + if (!config) { + continue + } + if (w?.type == "text" && config[1].vhs_path_extensions) { + new_widgets.push(app.widgets.VHSPATH({}, w.name, ["VHSPATH", config[1]])); + } else { + new_widgets.push(w) + } + } + this.widgets = new_widgets; + } + const originalAddInput = this.addInput; + this.addInput = function(name, type, options) { + if (options.widget) { + //Is Converted Widget + const widget = this.widgets.find((w) => w.name == name) + if (widget?.config) { + //Has override for type + type = widget.config[0] + if (type == 'FLOAT') { + type = "FLOAT,INT" + } + setWidgetConfig(options, widget.config) + } + } + return originalAddInput.apply(this, [name, type, options]) + } + }); + } + if (nodeData?.name == "VHS_LoadImages") { + addUploadWidget(nodeType, nodeData, "directory", "folder"); + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "directory"); + chainCallback(pathWidget, "callback", (value) => { + if (!value) { + return; + } + let params = {filename : value, type : "input", format: "folder"}; + this.updateParameters(params, true); + }); + }); + addLoadCommon(nodeType, nodeData); + } else if (nodeData?.name == "VHS_LoadImagesPath") { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "directory"); + chainCallback(pathWidget, "callback", (value) => { + if (!value) { + return; + } + let params = {filename : value, type : "path", format: "folder"}; + this.updateParameters(params, true); + }); + }); + addLoadCommon(nodeType, nodeData); + } else if (nodeData?.name == "VHS_LoadVideo" || nodeData?.name == "VHS_LoadVideoFFmpeg") { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "video"); + chainCallback(pathWidget, "callback", (value) => { + if (!value) { + return; + } + let parts = ["input", value]; + let extension_index = parts[1].lastIndexOf("."); + let extension = parts[1].slice(extension_index+1); + let format = "video" + if (["gif", "webp", "avif"].includes(extension)) { + format = "image" + } + format += "/" + extension; + let params = {filename : parts[1], type : parts[0], format: format}; + this.updateParameters(params, true); + }); + }); + addUploadWidget(nodeType, nodeData, "video"); + addLoadCommon(nodeType, nodeData); + addVAEOutputToggle(nodeType, nodeData); + } else if (nodeData?.name == "VHS_LoadAudio") { + addAudioPreview(nodeType) + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "audio_file"); + chainCallback(pathWidget, "callback", (filename) => { + this.updateParameters({filename, type: 'path'}, true); + }); + }); + } else if (nodeData?.name == "VHS_LoadAudioUpload") { + addUploadWidget(nodeType, nodeData, "audio", "audio"); + addAudioPreview(nodeType) + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "audio"); + chainCallback(pathWidget, "callback", (filename) => { + if (!filename) return + let params = {filename, type : "input"}; + this.updateParameters(params, true); + }); + }); + } else if (nodeData?.name == "VHS_LoadVideoPath" || nodeData?.name == "VHS_LoadVideoFFmpegPath") { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "video"); + chainCallback(pathWidget, "callback", (value) => { + let extension_index = value.lastIndexOf("."); + let extension = value.slice(extension_index+1); + let format = "video" + if (["gif", "webp", "avif"].includes(extension)) { + format = "image" + } + format += "/" + extension; + let params = {filename : value, type: "path", format: format}; + this.updateParameters(params, true); + }); + }); + addLoadCommon(nodeType, nodeData); + addVAEOutputToggle(nodeType, nodeData); + } else if (nodeData?.name == "VHS_LoadImagePath") { + addLoadCommon(nodeType, nodeData); + addVAEOutputToggle(nodeType, nodeData); + chainCallback(nodeType.prototype, "onNodeCreated", function() { + const pathWidget = this.widgets.find((w) => w.name === "image"); + chainCallback(pathWidget, "callback", (value) => { + let extension_index = value.lastIndexOf("."); + let extension = value.slice(extension_index+1); + let format = "video" + "/" + extension; + let params = {filename : value, type: "path", format: format}; + this.updateParameters(params, true); + }); + }); + } else if (nodeData?.name == "VHS_VideoCombine") { + addDateFormatting(nodeType, "filename_prefix"); + chainCallback(nodeType.prototype, "onExecuted", function(message) { + if (message?.gifs) { + this.updateParameters(message.gifs[0], true); + } + }); + addVideoPreview(nodeType, false); + addPreviewOptions(nodeType); + addFormatWidgets(nodeType, nodeData); + addVAEInputToggle(nodeType, nodeData) + } else if (nodeData?.name == "VHS_SaveImageSequence") { + //Disabled for safety as VHS_SaveImageSequence is not currently merged + //addDateFormating(nodeType, "directory_name", timestamp_widget=true); + //addTimestampWidget(nodeType, nodeData, "directory_name") + } else if (nodeData?.name == "VHS_BatchManager") { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + this.widgets.push({name: "count", type: "dummy", value: 0, + computeSize: () => {return [0,-4]}, + afterQueued: function() {this.value++;}}); + }); + } else if (nodeData?.name == "VHS_Unbatch") { + cloneType(nodeType, nodeData) + } else if (nodeData?.name == "VHS_SelectLatest") { + chainCallback(nodeType.prototype, "onNodeCreated", function() { + this.isVirtualNode = true + chainCallback(this, "onConnectionsChange", function (contype, slot, iscon, linfo) { + if (iscon) { + this.update_links() + } + }) + + this.update_links = function(extraLinks = []) { + if (!this.outputs[0].links?.length) return + + function get_links(node) { + let links = [] + for (const l of node.outputs[0].links) { + const linkInfo = node.graph.links[l] + const n = node.graph.getNodeById(linkInfo.target_id) + if (n.type == 'Reroute') { + links = links.concat(get_links(n)) + } else { + links.push(l) + } + } + return links + } + + let links = [ + ...get_links(this).map((l) => this.graph.links[l]), + ...extraLinks + ] + let v = this.latest_file + if (!v) { + return + } + + // For each output link copy our value over the original widget value + for (const linkInfo of links) { + const node = this.graph.getNodeById(linkInfo.target_id) + const input = node.inputs[linkInfo.target_slot] + const widgetName = input.widget.name + const widget = node.widgets.find((w) => w.name === widgetName) + if (widget) { + widget.value = v + if (widget.callback) { + widget.callback( widget.value, app.canvas, + node, app.canvas.graph_mouse, {}) + } + } + } + } + let fetch_files = async () => { + let [path, remainder] = path_stem(this.widgets[0].value) + let params = {path : path} + let optionsURL = api.apiURL('/vhs/getpath?' + new URLSearchParams(params)); + let options = [] + try { + let resp = await fetch(optionsURL); + options = await resp.json(); + } catch(e) {} + options = options.filter((file) => file.startsWith(remainder) && file.endsWith(this.widgets[1].value)) + if (options.length && this.latest_file != options[options.length-1]) { + this.latest_file = path + options[options.length-1] + this.update_links() + } + } + this.widgets[0].callback = fetch_files + this.widgets[1].callback = fetch_files + this.onPromptExecuted = fetch_files + this.applyToGraph = this.update_links + }) + } + }, + async getCustomWidgets() { + return { + VHSPATH(node, inputName, inputData) { + let w = { + name : inputName, + type : "VHS.PATH", + value : "", + draw : function(ctx, node, widget_width, y, H) { + //Adapted from litegraph.core.js:drawNodeWidgets + var show_text = app.canvas.ds.scale >= (app.canvas.low_quality_zoom_threshold ?? 0.5) + var margin = 15; + var text_color = LiteGraph.WIDGET_TEXT_COLOR; + var secondary_text_color = LiteGraph.WIDGET_SECONDARY_TEXT_COLOR; + ctx.textAlign = "left"; + ctx.strokeStyle = LiteGraph.WIDGET_OUTLINE_COLOR; + ctx.fillStyle = LiteGraph.WIDGET_BGCOLOR; + ctx.beginPath(); + if (show_text) + ctx.roundRect(margin, y, widget_width - margin * 2, H, [H * 0.5]); + else + ctx.rect( margin, y, widget_width - margin * 2, H ); + ctx.fill(); + if (show_text) { + if(!this.disabled) + ctx.stroke(); + ctx.save(); + ctx.beginPath(); + ctx.rect(margin, y, widget_width - margin * 2, H); + ctx.clip(); + + //ctx.stroke(); + let freeWidth = widget_width - (margin * 2 + 40) + ctx.fillStyle = secondary_text_color; + const label = this.label || this.name; + if (label != null) { + let [labelDisplay, labelWidth] = fitText(ctx, label, freeWidth) + freeWidth -= labelWidth + ctx.fillText(labelDisplay, margin * 2, y + H * 0.7); + } + ctx.fillStyle = this.value ? text_color : '#777'; + ctx.textAlign = "right"; + let disp_text = fitPath(ctx, String(this.value || this.options.placeholder), freeWidth)[0] + ctx.fillText(disp_text, widget_width - margin * 2, y + H * 0.7); + ctx.restore(); + } + }, + mouse : searchBox, + options : {}, + }; + if (inputData.length > 1) { + w.options = inputData[1] + if (inputData[1].default) { + w.value = inputData[1].default; + } + } + + if (!node.widgets) { + node.widgets = []; + } + node.widgets.push(w); + return w; + }, + VHSFLOAT(node, inputName, inputData) { + let w = { + name: inputName, + type: "VHS.ANNOTATED", + value: inputData[1]?.default ?? 0, + draw: drawAnnotated, + mouse: mouseAnnotated, + computeSize(width) { + return [width, 20] + }, + callback(v) { + if (this.options.round) { + //TODO adopt ComfyUI_frontend#4291? + v = Math.round((v + Number.EPSILON) / + this.options.round) * this.options.round + } + if (this.options.max && v > this.options.max) { + v = this.options.max + } + if (this.options.min && v < this.options.max) { + v = this.options.min + } + this.value = v + }, + config: inputData, + displayValue: function() { + return roundToPrecision(this.value, this.options.precision ?? 3) + }, + options: Object.assign({}, inputData[1]) + } + if (!node.widgets) { + node.widgets = [] + } + node.widgets.push(w) + return w + }, + VHSINT(node, inputName, inputData) { + let w = { + name: inputName, + type: "VHS.ANNOTATED", + value: inputData[1]?.default ?? 0, + draw: drawAnnotated, + mouse: mouseAnnotated, + computeSize(width) { + return [width, 20] + }, + callback(v) { + if (this.options.max && v > this.options.max) { + v = this.options.max + } + if (this.options.min && v < this.options.min) { + v = this.options.min + } + if (v == 0) { + return + } + const s = this.options.step + let sh = this.options.mod ?? 0 + this.value = Math.round((v - sh) / s) * s + sh + }, + config: inputData, + displayValue: function() { + return this.value | 0 + }, + options: Object.assign({}, inputData[1]) + } + if (!node.widgets) { + node.widgets = [] + } + node.widgets.push(w) + return w + }, + VHSTIMESTAMP(node, inputName, inputData) { + let w = { + name: inputName, + type: "VHS.TIMESTAMP", + value: inputData[1]?.default ?? 0, + draw: drawAnnotated, + mouse: mouseAnnotated, + computeSize(width) { + return [width, 20] + }, + parseValue(v) { + if (typeof(v) == "string") { + let val = 0 + for (let chunk of v.split(":")) { + val = val * 60 + parseFloat(chunk) + } + return val + } + }, + callback(v) {}, + config: inputData, + options: Object.assign({}, inputData[1]), + displayValue() { + let seconds = this.value + let hours = seconds / 3600 | 0 + seconds -= 3600 * hours + let minutes = seconds / 60 | 0 + seconds -= 60 * minutes + let display = "" + if (hours > 0) { + display += hours + ":" + } + if (hours > 0 || minutes > 0) { + if (hours > 0) { + minutes = (''+minutes).padStart(2,'0') + } + display += minutes + ":" + } + seconds = roundToPrecision(seconds, 4) + if ((seconds[1] == '.' || seconds.length == 1) && (minutes > 0 || hours > 0)) { + seconds = '0'+seconds + } + display += seconds + return display + } + } + if (!node.widgets) { + node.widgets = [] + } + node.widgets.push(w) + return w + }, + } + }, + async loadedGraphNode(node) { + //Check and migrate inputs named batch_manager from old workflows + if (node.type?.startsWith("VHS_") && node.inputs) { + const batchInput = node.inputs.find((i) => i.name == "batch_manager") + if (batchInput) { + batchInput.name = "meta_batch" + } + } + }, + async beforeConfigureGraph(graphData, missingNodeTypes) { + if(helpDOM?.node) { + helpDOM.node = undefined + } + }, + async setup() { + let originalGraphToPrompt = app.graphToPrompt + let graphToPrompt = async function() { + let res = await originalGraphToPrompt.apply(this, arguments); + res.workflow.extra['VHS_latentpreview'] = app.ui.settings.getSettingValue("VHS.LatentPreview") + res.workflow.extra['VHS_latentpreviewrate'] = app.ui.settings.getSettingValue("VHS.LatentPreviewRate") + res.workflow.extra['VHS_MetadataImage'] = app.ui.settings.getSettingValue("VHS.MetadataImage") + res.workflow.extra['VHS_KeepIntermediate'] = app.ui.settings.getSettingValue("VHS.KeepIntermediate") + return res + } + app.graphToPrompt = graphToPrompt + //Add a handler for pasting video data + document.addEventListener('paste', async (e) => { + if (!e.target.classList.contains('litegraph') && + !e.target.classList.contains('graph-canvas-container')) { + return + } + let data = e.clipboardData || window.clipboardData + let filepath = data.getData('text/plain') + let video + for (const item of data.items) { + if (item.type.startsWith('video/')) { + video = item + break + } + } + if (filepath && copiedPath == filepath) { + //Add a Load Video (Path) and populate filepath + const pastedNode = LiteGraph.createNode('VHS_LoadVideoPath') + app.canvas.graph.add(pastedNode) + pastedNode.pos[0] = app.canvas.graph_mouse[0] + pastedNode.pos[1] = app.canvas.graph_mouse[1] + pastedNode.widgets[0].value = filepath + pastedNode.widgets[0].callback?.(filepath) + } else if (video && false) { + //Disabled due to lack of testing + //Add a Load Video (Upload), then upload the file, then select the file + const pastedNode = LiteGraph.createNode('VHS_LoadVideo') + app.canvas.graph.add(pastedNode) + pastedNode.pos[0] = app.canvas.graph_mouse[0] + pastedNode.pos[1] = app.canvas.graph_mouse[1] + const pathWidget = pastedNode.widgets[0] + //TODO: upload to pasted dir? + const blob = video.getAsFile() + const resp = await uploadFile(blob) + if (resp.status != 200) { + //upload failed and file can not be added to options + return; + } + const filename = (await resp.json()).name; + pathWidget.options.values.push(filename); + pathWidget.value = filename; + pathWidget.callback?.(filename) + } else { + return + } + e.preventDefault() + e.stopImmediatePropagation() + return false + }, true) + }, + async init() { + if (app.ui.settings.getSettingValue("VHS.AdvancedPreviews") == true) { + app.ui.settings.setSettingValue("VHS.AdvancedPreviews", 'Always') + } + if (app.ui.settings.getSettingValue("VHS.AdvancedPreviews") == false) { + app.ui.settings.setSettingValue("VHS.AdvancedPreviews", 'Never') + } + if (app.VHSHelp != helpDOM) { + helpDOM = app.VHSHelp + } else { + initHelpDOM() + } + let e = app.extensions.filter((w) => w.name == 'UVR5.AudioPreviewer') + if (e.length) { + let orig = e[0].beforeRegisterNodeDef + e[0].beforeRegisterNodeDef = function(nodeType, nodeData, app) { + if(!nodeData?.name?.startsWith("VHS_")) { + return orig.apply(this, arguments); + } + } + } + }, +}); +let previewImages = [] +api.addEventListener('executing', ({ detail }) => { + if (detail === null) { + for (let graph of [app.graph, ...app.graph.subgraphs.values()]) { + for (let node of graph._nodes) { + if (node.type.startsWith("VHS_")) { + node.onPromptExecuted?.() + } + } + } + } +}) +function getLatentPreviewCtx(id, width, height) { + const node = getNodeById(id) + if (!node) { + return undefined + } + + let previewWidget = node.widgets.find((w) => w.name == "vhslatentpreview") + if (!previewWidget) { + //check for and remove any native preview + let nativePreview = node.widgets.findIndex((w) => w.name == '$$canvas-image-preview') + if (nativePreview >= 0) { + node.imgs = [] + node.widgets.splice(nativePreview,1) + } + let canvasEl = document.createElement("canvas") + canvasEl.style.width = "100%" + previewWidget = node.addDOMWidget("vhslatentpreview", "vhscanvas", canvasEl, { + serialize: false, + hideOnZoom: false, + }); + previewWidget.serialize = false + allowDragFromWidget(previewWidget) + canvasEl.addEventListener('contextmenu', (e) => { + e.preventDefault() + return app.canvas._mousedown_callback(e) + }, true); + canvasEl.addEventListener('pointerdown', (e) => { + e.preventDefault() + return app.canvas._mousedown_callback(e) + }, true); + canvasEl.addEventListener('mousewheel', (e) => { + e.preventDefault() + return app.canvas._mousewheel_callback(e) + }, true); + canvasEl.addEventListener('pointermove', (e) => { + e.preventDefault() + return app.canvas._mousemove_callback(e) + }, true); + canvasEl.addEventListener('pointerup', (e) => { + e.preventDefault() + return app.canvas._mouseup_callback(e) + }, true); + + previewWidget.computeSize = function(width) { + if (this.aspectRatio) { + let height = (node.size[0]-20)/ this.aspectRatio + 10; + if (!(height > 0)) { + height = 0; + } + this.computedHeight = height + 10; + return [width, height]; + } + return [width, -4];//no loaded src, widget should not display + } + } + let canvasEl = previewWidget.element + if (!previewWidget.ctx || canvasEl.width != width + || canvasEl.height != height) { + previewWidget.aspectRatio = width / height + canvasEl.width = width + canvasEl.height = height + fitHeight(node) + } + return canvasEl.getContext("2d") +} +let animateIntervals = {} +function beginLatentPreview(id, previewImages, rate) { + latentPreviewNodes.add(id) + if (animateIntervals[id]) { + clearTimeout(animateIntervals[id]) + } + let displayIndex = 0 + let node = getNodeById(id) + //While progress is safely cleared on execution completion. + //Initial progress must be started here to avoid a race condition + node.progress = 0 + animateIntervals[id] = setInterval(() => { + if (getNodeById(id)?.progress == undefined + || app.canvas.graph.rootGraph != node.graph.rootGraph) { + clearTimeout(animateIntervals[id]) + delete animateIntervals[id] + return + } + if (!previewImages[displayIndex]) { + return + } + getLatentPreviewCtx(id, previewImages[displayIndex].width, + previewImages[displayIndex].height)?.drawImage?.(previewImages[displayIndex],0,0) + displayIndex = (displayIndex + 1) % previewImages.length + }, 1000/rate); + +} +let previewImagesDict = {} +api.addEventListener('VHS_latentpreview', ({ detail }) => { + if (detail.id == null) { + return + } + let previewImages = previewImagesDict[detail.id] = [] + previewImages.length = detail.length + + let idParts = detail.id.split(':') + for (let i=1; i <= idParts.length; i++) { + let id = idParts.slice(0,i).join(':') + beginLatentPreview(id, previewImages, detail.rate) + } +}); +let td = new TextDecoder() +api.addEventListener('b_preview', async (e) => { + if (Object.keys(animateIntervals).length == 0) { + return + } + e.preventDefault() + e.stopImmediatePropagation() + e.stopPropagation() + const dv = new DataView(await e.detail.slice(0,24).arrayBuffer()) + const index = dv.getUint32(4) + const idlen = dv.getUint8(8) + const id = td.decode(dv.buffer.slice(9,9+idlen)) + previewImagesDict[id][index] = await window.createImageBitmap(e.detail.slice(24)) + return false +}, true); diff --git a/zavodik/nodes/ComfyUI-VideoHelperSuite/web/js/videoinfo.js b/zavodik/nodes/ComfyUI-VideoHelperSuite/web/js/videoinfo.js new file mode 100644 index 0000000000000000000000000000000000000000..144a136a478828497573574c4af6ba121aec47cc --- /dev/null +++ b/zavodik/nodes/ComfyUI-VideoHelperSuite/web/js/videoinfo.js @@ -0,0 +1,101 @@ +import { app } from '../../../scripts/app.js' + + +function getVideoMetadata(file) { + return new Promise((r) => { + const reader = new FileReader(); + reader.onload = (event) => { + const videoData = new Uint8Array(event.target.result); + const dataView = new DataView(videoData.buffer); + + let decoder = new TextDecoder(); + // Check for known valid magic strings + if (dataView.getUint32(0) == 0x1A45DFA3) { + //webm/mkv (both use EBML/Matroska format) + //see http://wiki.webmproject.org/webm-metadata/global-metadata + //and https://www.matroska.org/technical/elements.html + //contrary to specs, tag seems consistently at start + //COMMENT + 0x4487 + packed length? + //length 0x8d8 becomes 0x48d8 + // + //description for variable length ints https://github.com/ietf-wg-cellar/ebml-specification/blob/master/specification.markdown + let offset = 4 + 8; //COMMENT is 7 chars + 1 to realign + while(offset < videoData.length-16) { + //Check for text tags + if (dataView.getUint16(offset) == 0x4487) { + //check that name of tag is COMMENT + const name = String.fromCharCode(...videoData.slice(offset-7,offset)); + if (name === "COMMENT") { + let vint = dataView.getUint32(offset+2); + let n_octets = Math.clz32(vint)+1; + if (n_octets < 4) {//250MB sanity cutoff + let length = (vint >> (8*(4-n_octets))) & ~(1 << (7*n_octets)); + const content = decoder.decode(videoData.slice(offset+2+n_octets, offset+2+n_octets+length)); + let json = JSON.parse(content); + r(json); + return; + } + } + } + offset+=1; + } + } else if (dataView.getUint32(4) == 0x66747970 && dataView.getUint32(8) == 0x69736F6D) { + //mp4 + //see https://developer.apple.com/documentation/quicktime-file-format + //Seems to make no guarantee for alignment + let offset = videoData.length-4; + while (offset > 16) {//rough safe guess + if (dataView.getUint32(offset) == 0x64617461) {//any data tag + if (dataView.getUint32(offset - 8) == 0xa9636d74) {//cmt data tag + let type = dataView.getUint32(offset+4); //seemingly 1 + let locale = dataView.getUint32(offset+8); //seemingly 0 + let size = dataView.getUint32(offset-4) - 4*4; + const content = decoder.decode(videoData.slice(offset+12, offset+12+size)); + const json = JSON.parse(content); + r(json); + return; + } + } + + offset-=1; + } + } else { + console.error("Unknown magic: " + dataView.getUint32(0)) + } + r(); + return; + }; + + reader.readAsArrayBuffer(file); + }); +} +function isVideoFile(file) { + if (file?.name?.endsWith(".webm")) { + return true; + } + if (file?.name?.endsWith(".mp4")) { + return true; + } + if (file?.name?.endsWith(".mkv")) { + return true; + } + + return false; +} + +let originalHandleFile = app.handleFile; +app.handleFile = handleFile; +let fileInput = document.getElementById("comfy-file-input") +//hijack comfy-file-input to allow webm/mp4/mkv +fileInput.accept += ",video/webm,video/mp4,video/x-matroska"; + +async function handleFile(file) { + if (file?.type?.startsWith("video/") || isVideoFile(file)) { + const videoInfo = await getVideoMetadata(file); + if (videoInfo?.workflow) { + await app.loadGraphData(videoInfo.workflow); + return + } + } + return await originalHandleFile.apply(this, arguments); +} diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.gitattributes b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..f13e053bf0ebf99d69b8e28c0f02eb346dcfe15e --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.github/workflows/publish.yml b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.github/workflows/publish.yml new file mode 100644 index 0000000000000000000000000000000000000000..be75d38aa858e24376e02d4782290390f1f8bb88 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.github/workflows/publish.yml @@ -0,0 +1,25 @@ +name: Publish to Comfy registry +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "pyproject.toml" + +permissions: + issues: write + +jobs: + publish-node: + name: Publish Custom Node to registry + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'kijai' }} + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@v1 + with: + ## Add your own personal access token to your Github Repository secrets and reference it here. + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.gitignore b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bca63641228c15a253b57f8b3eb0d3af63b64f3c --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/.gitignore @@ -0,0 +1,13 @@ +output/ +*__pycache__/ +samples*/ +runs/ +checkpoints/ +master_ip +logs/ +*.DS_Store +.idea +tools/ +.vscode/ +convert_* +*.pt \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/LICENSE b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..29f81d812f3e768fa89638d1f72920dbfd1413a8 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__init__.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a6626d186ec268608d55d1dade85dd005351047c --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__init__.py @@ -0,0 +1,3 @@ +from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab1cdcb9f3decd86f8d30b618775e86f4f980a72 Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/nodes.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/nodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5267093814b5844b532b685b6bb78d6a30818760 Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/nodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/retarget_pose.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/retarget_pose.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4df1ccb7f15bdc3a874f28a43258b453049e02d8 Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/retarget_pose.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/utils.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e9562dd00c5c46f0378a1be034d47982ca8396d Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/__pycache__/utils.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/example.png b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/example.png new file mode 100644 index 0000000000000000000000000000000000000000..892adea8614b3268b378cd1fbb7df242ebc73a8e --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e89afdcc2a5c34b6a576ffe018720197914bb0c59cb3ba2ac7f33e2a56ec2396 +size 568747 diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/example_workflows/WanAnimate_native_example_01.json b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/example_workflows/WanAnimate_native_example_01.json new file mode 100644 index 0000000000000000000000000000000000000000..ca7118a7eaaedfde628d9aa74d058ede1a1866c1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/example_workflows/WanAnimate_native_example_01.json @@ -0,0 +1,3797 @@ +{ + "id": "8b7a9a57-2303-4ef5-9fc2-bf41713bd1fc", + "revision": 0, + "last_node_id": 207, + "last_link_id": 362, + "nodes": [ + { + "id": 66, + "type": "ImageConcatMulti", + "pos": [ + 2660.1005859375, + -950.2750244140625 + ], + "size": [ + 270, + 150 + ], + "flags": {}, + "order": 64, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": 87 + }, + { + "name": "image_2", + "shape": 7, + "type": "IMAGE", + "link": 107 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 89 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "468fcc86f0b29e79a8510e8239eb15714d6747a6" + }, + "widgets_values": [ + 2, + "left", + true, + null + ] + }, + { + "id": 137, + "type": "GetNode", + "pos": [ + 2441.813232421875, + -1173.92919921875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 239 + ] + } + ], + "title": "Get_face_images", + "properties": {}, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 140, + "type": "GetNode", + "pos": [ + 2441.813232421875, + -1124.576904296875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 243 + ] + } + ], + "title": "Get_pose_images", + "properties": {}, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 134, + "type": "GetNode", + "pos": [ + 2443.561279296875, + -1227.2171630859375 + ], + "size": [ + 210, + 34 + ], + "flags": { + "collapsed": true + }, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 236 + ] + } + ], + "title": "Get_reference_image", + "properties": {}, + "widgets_values": [ + "reference_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 148, + "type": "SetNode", + "pos": [ + -548.7736206054688, + -2964.476318359375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "link": 255 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_input_audio", + "properties": { + "previousName": "input_audio" + }, + "widgets_values": [ + "input_audio" + ] + }, + { + "id": 149, + "type": "GetNode", + "pos": [ + 3043.859130859375, + -1156.882080078125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "links": [ + 256 + ] + } + ], + "title": "Get_input_audio", + "properties": {}, + "widgets_values": [ + "input_audio" + ] + }, + { + "id": 153, + "type": "SetNode", + "pos": [ + -1605.1839599609375, + -2832.218994140625 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 263 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_width", + "properties": { + "previousName": "width" + }, + "widgets_values": [ + "width" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 154, + "type": "SetNode", + "pos": [ + -1617.8741455078125, + -2533.993408203125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 264 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_height", + "properties": { + "previousName": "height" + }, + "widgets_values": [ + "height" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 63, + "type": "VHS_LoadVideo", + "pos": [ + -876.9246826171875, + -3084.905517578125 + ], + "size": [ + 315.8014221191406, + 491.6708679199219 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + }, + { + "name": "custom_width", + "type": "INT", + "widget": { + "name": "custom_width" + }, + "link": 257 + }, + { + "name": "custom_height", + "type": "INT", + "widget": { + "name": "custom_height" + }, + "link": 258 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 248 + ] + }, + { + "name": "frame_count", + "type": "INT", + "links": [ + 267 + ] + }, + { + "name": "audio", + "type": "AUDIO", + "links": [ + 255 + ] + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_LoadVideo" + }, + "widgets_values": { + "video": "raw.mp4", + "force_rate": 16, + "custom_width": 960, + "custom_height": 544, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1, + "format": "AnimateDiff", + "choose video to upload": "image", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "raw.mp4", + "type": "input", + "format": "video/mp4", + "force_rate": 16, + "custom_width": 960, + "custom_height": 544, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1 + } + } + } + }, + { + "id": 157, + "type": "SetNode", + "pos": [ + -528.8223266601562, + -3030.21337890625 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 267 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_frame_count", + "properties": { + "previousName": "frame_count" + }, + "widgets_values": [ + "frame_count" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 144, + "type": "SetNode", + "pos": [ + -522.720947265625, + -3107.148681640625 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 248 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 306 + ] + } + ], + "title": "Set_input_video", + "properties": { + "previousName": "input_video" + }, + "widgets_values": [ + "input_video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 128, + "type": "SetNode", + "pos": [ + -459.54620361328125, + -1650.783935546875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 231 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_reference_image", + "properties": { + "previousName": "reference_image" + }, + "widgets_values": [ + "reference_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 64, + "type": "ImageResizeKJv2", + "pos": [ + -772.3116455078125, + -1675.555419921875 + ], + "size": [ + 270, + 336 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 82 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 286 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 287 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 231 + ] + }, + { + "name": "width", + "type": "INT", + "links": [] + }, + { + "name": "height", + "type": "INT", + "links": [] + }, + { + "name": "mask", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "468fcc86f0b29e79a8510e8239eb15714d6747a6", + "Node name for S&R": "ImageResizeKJv2" + }, + "widgets_values": [ + 832, + 480, + "lanczos", + "pad_edge_pixel", + "0, 0, 0", + "top", + 16, + "cpu", + "Output: 1 x 832 x 480 | 4.57MB" + ] + }, + { + "id": 57, + "type": "LoadImage", + "pos": [ + -1116.435791015625, + -1679.51318359375 + ], + "size": [ + 274.080078125, + 314 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 82 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.57", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "refer.jpeg", + "image" + ] + }, + { + "id": 142, + "type": "SetNode", + "pos": [ + 1086.6927490234375, + -2124.5263671875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "MASK", + "type": "MASK", + "link": 245 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 246 + ] + } + ], + "title": "Set_mask", + "properties": { + "previousName": "mask" + }, + "widgets_values": [ + "mask" + ], + "color": "#1c5715", + "bgcolor": "#1f401b" + }, + { + "id": 99, + "type": "DrawMaskOnImage", + "pos": [ + 1222.5340576171875, + -2132.91455078125 + ], + "size": [ + 270, + 78 + ], + "flags": {}, + "order": 63, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 250 + }, + { + "name": "mask", + "type": "MASK", + "link": 246 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 233 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "623b5913dc7f240fd8b26422e99f8849a21c5473", + "Node name for S&R": "DrawMaskOnImage" + }, + "widgets_values": [ + "0, 0, 0" + ] + }, + { + "id": 146, + "type": "GetNode", + "pos": [ + 1086.932861328125, + -2184.602783203125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 250 + ] + } + ], + "title": "Get_input_video", + "properties": {}, + "widgets_values": [ + "input_video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 130, + "type": "SetNode", + "pos": [ + 1294.1051025390625, + -2211.976806640625 + ], + "size": [ + 211.05747985839844, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 65, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 233 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 251 + ] + } + ], + "title": "Set_background_image", + "properties": { + "previousName": "background_image" + }, + "widgets_values": [ + "background_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 77, + "type": "ImageConcatMulti", + "pos": [ + 2653.37939453125, + -1220.90087890625 + ], + "size": [ + 270, + 190 + ], + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": 236 + }, + { + "name": "image_2", + "shape": 7, + "type": "IMAGE", + "link": 239 + }, + { + "name": "image_3", + "shape": 7, + "type": "IMAGE", + "link": 243 + }, + { + "name": "image_4", + "shape": 7, + "type": "IMAGE", + "link": 249 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 107 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "468fcc86f0b29e79a8510e8239eb15714d6747a6" + }, + "widgets_values": [ + 4, + "down", + true, + null + ] + }, + { + "id": 145, + "type": "GetNode", + "pos": [ + 2463.91552734375, + -1073.7054443359375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 249 + ] + } + ], + "title": "Get_input_video", + "properties": {}, + "widgets_values": [ + "input_video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 42, + "type": "GetImageSizeAndCount", + "pos": [ + 2555.719482421875, + -692.73095703125 + ], + "size": [ + 277.20001220703125, + 86 + ], + "flags": {}, + "order": 62, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 346 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 87 + ] + }, + { + "label": "832 width", + "name": "width", + "type": "INT", + "links": null + }, + { + "label": "480 height", + "name": "height", + "type": "INT", + "links": null + }, + { + "label": "109 count", + "name": "count", + "type": "INT", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "36f6fdd7d4c393675ac622bd300ef667ee65d8b8", + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 151, + "type": "INTConstant", + "pos": [ + -1704.931640625, + -2640.54638671875 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": false + }, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 258, + 264, + 287 + ] + } + ], + "title": "Height", + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "37659859825cea55940a58110525795ce5deb8be", + "Node name for S&R": "INTConstant" + }, + "widgets_values": [ + 480 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 178, + "type": "OnnxDetectionModelLoader", + "pos": [ + -506.44635009765625, + -2398.935302734375 + ], + "size": [ + 351.52410888671875, + 106 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "model", + "type": "POSEMODEL", + "links": [ + 290 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ver": "e63d6e71ae4c271f3f81211a7ca7f87607b7e50d", + "Node name for S&R": "OnnxDetectionModelLoader" + }, + "widgets_values": [ + "vitpose-l-wholebody.onnx", + "onnx\\yolov10m.onnx", + "CUDAExecutionProvider" + ] + }, + { + "id": 182, + "type": "GrowMaskWithBlur", + "pos": [ + 431.7315368652344, + -2222.123779296875 + ], + "size": [ + 292.748046875, + 246 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 314 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 315 + ] + }, + { + "name": "mask_inverted", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "bb205d809b467307b8ec3bb1a22680a4873187f8", + "Node name for S&R": "GrowMaskWithBlur" + }, + "widgets_values": [ + 10, + 0, + true, + false, + 0, + 1, + 1, + false + ] + }, + { + "id": 108, + "type": "BlockifyMask", + "pos": [ + 779.2421264648438, + -2222.2099609375 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "masks", + "type": "MASK", + "link": 315 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 245 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "00da1910634fbf314d407608efb281ae6f7f1ba2", + "Node name for S&R": "BlockifyMask" + }, + "widgets_values": [ + 32 + ] + }, + { + "id": 127, + "type": "Note", + "pos": [ + 983.2726440429688, + -2067.319580078125 + ], + "size": [ + 210, + 88 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "These are new nodes in KJNodes" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 104, + "type": "Sam2Segmentation", + "pos": [ + 83.82372283935547, + -2220.965576171875 + ], + "size": [ + 272.087890625, + 182 + ], + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "sam2_model", + "type": "SAM2MODEL", + "link": 185 + }, + { + "name": "image", + "type": "IMAGE", + "link": 316 + }, + { + "name": "coordinates_positive", + "shape": 7, + "type": "STRING", + "link": null + }, + { + "name": "coordinates_negative", + "shape": 7, + "type": "STRING", + "link": null + }, + { + "name": "bboxes", + "shape": 7, + "type": "BBOX", + "link": 321 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 314 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-segment-anything-2", + "ver": "c59676b008a76237002926f684d0ca3a9b29ac54", + "Node name for S&R": "Sam2Segmentation" + }, + "widgets_values": [ + false, + false + ] + }, + { + "id": 180, + "type": "GetImageSizeAndCount", + "pos": [ + -246.24586486816406, + -3011.47705078125 + ], + "size": [ + 190.86483764648438, + 86 + ], + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 306 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 309, + 316 + ] + }, + { + "label": "832 width", + "name": "width", + "type": "INT", + "links": [ + 307, + 310 + ] + }, + { + "label": "480 height", + "name": "height", + "type": "INT", + "links": [ + 308, + 311 + ] + }, + { + "label": "109 count", + "name": "count", + "type": "INT", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "bb205d809b467307b8ec3bb1a22680a4873187f8", + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 173, + "type": "DrawViTPose", + "pos": [ + 120.56317138671875, + -2811.226318359375 + ], + "size": [ + 270, + 178 + ], + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 294 + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 307 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 308 + } + ], + "outputs": [ + { + "name": "pose_images", + "type": "IMAGE", + "links": [ + 319 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ver": "e63d6e71ae4c271f3f81211a7ca7f87607b7e50d", + "Node name for S&R": "DrawViTPose" + }, + "widgets_values": [ + 832, + 480, + 16, + -1, + -1, + "True" + ] + }, + { + "id": 75, + "type": "VHS_VideoCombine", + "pos": [ + 2010.67431640625, + -3041.57568359375 + ], + "size": [ + 743.6680297851562, + 765.5007934570312 + ], + "flags": {}, + "order": 67, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 251 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "WanVideo2_1_T2V", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "WanVideo2_1_T2V_00010.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "WanVideo2_1_T2V_00010.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\WanVideo2_1_T2V_00010.mp4" + } + } + } + }, + { + "id": 181, + "type": "VHS_VideoCombine", + "pos": [ + 1137.4183349609375, + -3026.80517578125 + ], + "size": [ + 743.6680297851562, + 765.5007934570312 + ], + "flags": {}, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 320 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "WanVideo2_1_T2V", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "WanVideo2_1_T2V_00009.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "WanVideo2_1_T2V_00009.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\WanVideo2_1_T2V_00009.mp4" + } + } + } + }, + { + "id": 174, + "type": "VHS_VideoCombine", + "pos": [ + 714.7760009765625, + -3057.50341796875 + ], + "size": [ + 214.7587890625, + 542.7587890625 + ], + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 318 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "0edce8ef7ce173ac97a3ed3d6f4636029d1a4530", + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "vitpose", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "vitpose_00007.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "vitpose_00007.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\vitpose_00007.mp4" + } + } + } + }, + { + "id": 183, + "type": "SetNode", + "pos": [ + 464.6632080078125, + -3038.54296875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 317 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 318 + ] + } + ], + "title": "Set_face_images", + "properties": { + "previousName": "face_images" + }, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 184, + "type": "SetNode", + "pos": [ + 442.45526123046875, + -2784.734375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 319 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 320 + ] + } + ], + "title": "Set_pose_images", + "properties": { + "previousName": "pose_images" + }, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 172, + "type": "PoseAndFaceDetection", + "pos": [ + 104.5530014038086, + -3028.416015625 + ], + "size": [ + 313.125, + 142 + ], + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "POSEMODEL", + "link": 290 + }, + { + "name": "images", + "type": "IMAGE", + "link": 309 + }, + { + "name": "retarget_image", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 310 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 311 + } + ], + "outputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "links": [ + 294 + ] + }, + { + "name": "face_images", + "type": "IMAGE", + "links": [ + 317 + ] + }, + { + "name": "key_frame_body_points", + "type": "STRING", + "links": null + }, + { + "name": "bboxes", + "type": "BBOX", + "links": [ + 321 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ver": "e63d6e71ae4c271f3f81211a7ca7f87607b7e50d", + "Node name for S&R": "PoseAndFaceDetection" + }, + "widgets_values": [ + 832, + 480 + ] + }, + { + "id": 102, + "type": "DownloadAndLoadSAM2Model", + "pos": [ + -470.4329528808594, + -2221.738037109375 + ], + "size": [ + 334.4137268066406, + 130 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "sam2_model", + "type": "SAM2MODEL", + "links": [ + 185 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-segment-anything-2", + "ver": "c59676b008a76237002926f684d0ca3a9b29ac54", + "Node name for S&R": "DownloadAndLoadSAM2Model" + }, + "widgets_values": [ + "sam2.1_hiera_base_plus.safetensors", + "video", + "cuda", + "fp16" + ] + }, + { + "id": 185, + "type": "Note", + "pos": [ + 257.0601806640625, + -2465.42041015625 + ], + "size": [ + 236.14007568359375, + 88 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note: SAM2 detection", + "properties": {}, + "widgets_values": [ + "You can use either the detected bbox or the kay_frame_body_points to positive coordinates, if one fails to creater proper mask, try the other" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 150, + "type": "INTConstant", + "pos": [ + -1695.39013671875, + -2773.4970703125 + ], + "size": [ + 210, + 58 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 257, + 263, + 286 + ] + } + ], + "title": "Width", + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "37659859825cea55940a58110525795ce5deb8be", + "Node name for S&R": "INTConstant" + }, + "widgets_values": [ + 832 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 30, + "type": "VHS_VideoCombine", + "pos": [ + 3064.7763671875, + -1021.487548828125 + ], + "size": [ + 1478.035400390625, + 1042.3026123046875 + ], + "flags": {}, + "order": 66, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 89 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": 256 + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "Wanimate", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": true, + "pingpong": false, + "save_output": true, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "Wanimate_00008-audio.mp4", + "subfolder": "", + "type": "output", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "Wanimate_00008.png", + "fullpath": "N:\\AI\\ComfyUI\\output\\Wanimate_00008-audio.mp4" + } + } + } + }, + { + "id": 177, + "type": "MarkdownNote", + "pos": [ + -1088.8204345703125, + -2393.302978515625 + ], + "size": [ + 536.27783203125, + 330.03546142578125 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Preprocessor links", + "properties": {}, + "widgets_values": [ + "Nodes:\n\n[https://github.com/kijai/ComfyUI-WanAnimatePreprocess](https://github.com/kijai/ComfyUI-WanAnimatePreprocess)\n\nModels:\n\nYOLO:\n\n[https://huggingface.co/Wan-AI/Wan2.2-Animate-14B/blob/main/process_checkpoint/det/yolov10m.onnx](https://huggingface.co/Wan-AI/Wan2.2-Animate-14B/blob/main/process_checkpoint/det/yolov10m.onnx)\n\nViTPose\n\nLarge:\n\n[https://huggingface.co/JunkyByte/easy_ViTPose/blob/main/onnx/wholebody/vitpose-l-wholebody.onnx](https://huggingface.co/JunkyByte/easy_ViTPose/blob/main/onnx/wholebody/vitpose-l-wholebody.onnx)\n\nHuge (needs both files):\n\n[https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_model.onnx](https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_model.onnx)\n\n[https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_data.bin](https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_data.bin)" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 191, + "type": "LoraLoaderModelOnly", + "pos": [ + -155.92713928222656, + -787.0674438476562 + ], + "size": [ + 632.4478759765625, + 82 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 323 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 325 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "LoraLoaderModelOnly" + }, + "widgets_values": [ + "WanVideo\\Lightx2v\\lightx2v_I2V_14B_480p_cfg_step_distill_rank64_bf16.safetensors", + 1.2 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 190, + "type": "LoraLoaderModelOnly", + "pos": [ + -160.91363525390625, + -918.007568359375 + ], + "size": [ + 487.27642822265625, + 82 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 324 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 323 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "LoraLoaderModelOnly" + }, + "widgets_values": [ + "WanVideo\\wan2.2_animate_14B_relight_lora_bf16.safetensors", + 1 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 204, + "type": "VAEDecode", + "pos": [ + 2335.7841796875, + -647.9304809570312 + ], + "size": [ + 140, + 46 + ], + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 362 + }, + { + "name": "vae", + "type": "VAE", + "link": 345 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 346 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 162, + "type": "GetNode", + "pos": [ + 2193.002197265625, + -617.4148559570312 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 345 + ] + } + ], + "title": "Get_VAE", + "properties": {}, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 195, + "type": "RandomNoise", + "pos": [ + 1681.1666259765625, + -731.4764404296875 + ], + "size": [ + 270, + 82 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "NOISE", + "type": "NOISE", + "links": [ + 326 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "RandomNoise" + }, + "widgets_values": [ + 42, + "fixed" + ] + }, + { + "id": 192, + "type": "TorchCompileModelWanVideoV2", + "pos": [ + -158.32838439941406, + -606.775146484375 + ], + "size": [ + 342.74609375, + 178 + ], + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 325 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 332, + 350 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "bb205d809b467307b8ec3bb1a22680a4873187f8", + "Node name for S&R": "TorchCompileModelWanVideoV2" + }, + "widgets_values": [ + "inductor", + false, + "default", + false, + true, + 64 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 206, + "type": "BasicScheduler", + "pos": [ + 1683.2818603515625, + -903.85498046875 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 350 + } + ], + "outputs": [ + { + "name": "SIGMAS", + "type": "SIGMAS", + "links": [ + 349 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "BasicScheduler" + }, + "widgets_values": [ + "simple", + 4, + 1 + ] + }, + { + "id": 138, + "type": "GetNode", + "pos": [ + 1322, + -333.85980224609375 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 352 + ] + } + ], + "title": "Get_face_images", + "properties": {}, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 141, + "type": "GetNode", + "pos": [ + 1333.7083740234375, + -296.18280029296875 + ], + "size": [ + 210, + 34 + ], + "flags": { + "collapsed": true + }, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 353 + ] + } + ], + "title": "Get_pose_images", + "properties": {}, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 131, + "type": "GetNode", + "pos": [ + 1313.332275390625, + -253.8428955078125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 354 + ] + } + ], + "title": "Get_background_image", + "properties": {}, + "widgets_values": [ + "background_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 158, + "type": "GetNode", + "pos": [ + 1379.7850341796875, + -51.34074783325195 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 19, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 358 + ] + } + ], + "title": "Get_frame_count", + "properties": {}, + "widgets_values": [ + "frame_count" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 156, + "type": "GetNode", + "pos": [ + 1378.34033203125, + -103.323486328125 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 20, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 357 + ] + } + ], + "title": "Get_height", + "properties": {}, + "widgets_values": [ + "height" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 155, + "type": "GetNode", + "pos": [ + 1379.78515625, + -156.0265655517578 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 21, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 356 + ] + } + ], + "title": "Get_width", + "properties": {}, + "widgets_values": [ + "width" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 143, + "type": "GetNode", + "pos": [ + 1379.7850341796875, + -204.57044982910156 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 22, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 355 + ] + } + ], + "title": "Get_mask", + "properties": {}, + "widgets_values": [ + "mask" + ], + "color": "#1c5715", + "bgcolor": "#1f401b" + }, + { + "id": 202, + "type": "GetNode", + "pos": [ + 1368.9952392578125, + -417.7628173828125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 23, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 340 + ] + } + ], + "title": "Get_VAE", + "properties": {}, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 201, + "type": "ConditioningZeroOut", + "pos": [ + 986.3633422851562, + -341.2440490722656 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 334 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 359 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 198, + "type": "CLIPTextEncode", + "pos": [ + 1035.0103759765625, + -664.8511352539062 + ], + "size": [ + 400, + 200 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 331 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 334, + 338 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "man is walking, style is soft 3D render style, night time, moonlight" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 193, + "type": "CLIPLoader", + "pos": [ + 225.7170867919922, + -533.3831787109375 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 331 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "umt5_xxl_fp8_e4m3fn_scaled.safetensors", + "wan", + "default" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 189, + "type": "SetNode", + "pos": [ + 86.28633117675781, + -338.70849609375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "VAE", + "type": "VAE", + "link": 322 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_VAE", + "properties": { + "previousName": "VAE" + }, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 188, + "type": "VAELoader", + "pos": [ + -232.2764892578125, + -368.0868225097656 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 322 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "wanvideo\\Wan2_1_VAE_bf16.safetensors" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 200, + "type": "CFGGuider", + "pos": [ + 1680.1092529296875, + -571.7877807617188 + ], + "size": [ + 270, + 98 + ], + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 332 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 336 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 337 + } + ], + "outputs": [ + { + "name": "GUIDER", + "type": "GUIDER", + "links": [ + 347 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CFGGuider" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 199, + "type": "WanAnimateToVideo", + "pos": [ + 1614.5421142578125, + -375.08544921875 + ], + "size": [ + 324.751953125, + 358 + ], + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 338 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 359 + }, + { + "name": "vae", + "type": "VAE", + "link": 340 + }, + { + "name": "clip_vision_output", + "shape": 7, + "type": "CLIP_VISION_OUTPUT", + "link": null + }, + { + "name": "reference_image", + "shape": 7, + "type": "IMAGE", + "link": 351 + }, + { + "name": "face_video", + "shape": 7, + "type": "IMAGE", + "link": 352 + }, + { + "name": "pose_video", + "shape": 7, + "type": "IMAGE", + "link": 353 + }, + { + "name": "background_video", + "shape": 7, + "type": "IMAGE", + "link": 354 + }, + { + "name": "character_mask", + "shape": 7, + "type": "MASK", + "link": 355 + }, + { + "name": "continue_motion", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 356 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 357 + }, + { + "name": "length", + "type": "INT", + "widget": { + "name": "length" + }, + "link": 358 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 336 + ] + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 337 + ] + }, + { + "name": "latent", + "type": "LATENT", + "links": [ + 339 + ] + }, + { + "name": "trim_latent", + "type": "INT", + "links": [ + 361 + ] + }, + { + "name": "trim_image", + "type": "INT", + "links": null + }, + { + "name": "video_frame_offset", + "type": "INT", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "WanAnimateToVideo" + }, + "widgets_values": [ + 832, + 480, + 77, + 1, + 5, + 0 + ] + }, + { + "id": 205, + "type": "KSamplerSelect", + "pos": [ + 2024.867919921875, + -748.396728515625 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAMPLER", + "type": "SAMPLER", + "links": [ + 348 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "KSamplerSelect" + }, + "widgets_values": [ + "lcm" + ] + }, + { + "id": 187, + "type": "DiffusionModelLoaderKJ", + "pos": [ + -918.0835571289062, + -920.0172119140625 + ], + "size": [ + 589.9105834960938, + 178 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "extra_state_dict", + "shape": 7, + "type": "STRING", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 324 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "bb205d809b467307b8ec3bb1a22680a4873187f8", + "Node name for S&R": "DiffusionModelLoaderKJ" + }, + "widgets_values": [ + "WanVideo\\2_2\\Wan2_2-Animate-14B_high_fp8_e4m3fn_native_KJ.safetensors", + "fp16", + "fp16", + false, + "auto", + true + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 133, + "type": "GetNode", + "pos": [ + 991.7420654296875, + -281.65087890625 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 28, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 351 + ] + } + ], + "title": "Get_reference_image", + "properties": {}, + "widgets_values": [ + "reference_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 194, + "type": "SamplerCustomAdvanced", + "pos": [ + 2041.89306640625, + -441.8531188964844 + ], + "size": [ + 461.3559875488281, + 382.6284484863281 + ], + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "noise", + "type": "NOISE", + "link": 326 + }, + { + "name": "guider", + "type": "GUIDER", + "link": 347 + }, + { + "name": "sampler", + "type": "SAMPLER", + "link": 348 + }, + { + "name": "sigmas", + "type": "SIGMAS", + "link": 349 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 339 + } + ], + "outputs": [ + { + "name": "output", + "type": "LATENT", + "links": [ + 360 + ] + }, + { + "name": "denoised_output", + "type": "LATENT", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "SamplerCustomAdvanced" + }, + "widgets_values": [] + }, + { + "id": 207, + "type": "TrimVideoLatent", + "pos": [ + 2556.41162109375, + -444.12176513671875 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 360 + }, + { + "name": "trim_amount", + "type": "INT", + "widget": { + "name": "trim_amount" + }, + "link": 361 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 362 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "TrimVideoLatent" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 164, + "type": "MarkdownNote", + "pos": [ + -1191.340087890625, + -515.903564453125 + ], + "size": [ + 884.3952026367188, + 400.1950378417969 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Markdown Note: Model Links", + "properties": {}, + "widgets_values": [ + "Model links:\n\n!!IMPORTANT!!\nMy initial upload of the fp8_scaled model works poorly in native workflows due to some face layer quantization, I've uploaded fixed version \"v2\" which should be used if you want to use fp8_scaled:\n\n[https://huggingface.co/Kijai/WanVideo_comfy_fp8_scaled/blob/main/Wan22Animate/Wan2_2-Animate-14B_fp8_scaled_e4m3fn_KJ_v2.safetensors](https://huggingface.co/Kijai/WanVideo_comfy_fp8_scaled/blob/main/Wan22Animate/Wan2_2-Animate-14B_fp8_scaled_e4m3fn_KJ_v2.safetensors)\n\n\nbf16:\n\n[https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/diffusion_models/wan2.2_animate_14B_bf16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/diffusion_models/wan2.2_animate_14B_bf16.safetensors)\n\n\nLoRA:\n\n[https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Lightx2v](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Lightx2v)\n\n[https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/loras/wan2.2_animate_14B_relight_lora_bf16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/loras/wan2.2_animate_14B_relight_lora_bf16.safetensors)\n\nText encoder:\n\n[https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\nVAE:\n\n[https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors)" + ], + "color": "#432", + "bgcolor": "#653" + } + ], + "links": [ + [ + 82, + 57, + 0, + 64, + 0, + "IMAGE" + ], + [ + 87, + 42, + 0, + 66, + 0, + "IMAGE" + ], + [ + 89, + 66, + 0, + 30, + 0, + "IMAGE" + ], + [ + 107, + 77, + 0, + 66, + 1, + "IMAGE" + ], + [ + 185, + 102, + 0, + 104, + 0, + "SAM2MODEL" + ], + [ + 231, + 64, + 0, + 128, + 0, + "*" + ], + [ + 233, + 99, + 0, + 130, + 0, + "*" + ], + [ + 236, + 134, + 0, + 77, + 0, + "IMAGE" + ], + [ + 239, + 137, + 0, + 77, + 1, + "IMAGE" + ], + [ + 243, + 140, + 0, + 77, + 2, + "IMAGE" + ], + [ + 245, + 108, + 0, + 142, + 0, + "*" + ], + [ + 246, + 142, + 0, + 99, + 1, + "MASK" + ], + [ + 248, + 63, + 0, + 144, + 0, + "*" + ], + [ + 249, + 145, + 0, + 77, + 3, + "IMAGE" + ], + [ + 250, + 146, + 0, + 99, + 0, + "IMAGE" + ], + [ + 251, + 130, + 0, + 75, + 0, + "IMAGE" + ], + [ + 255, + 63, + 2, + 148, + 0, + "*" + ], + [ + 256, + 149, + 0, + 30, + 1, + "AUDIO" + ], + [ + 257, + 150, + 0, + 63, + 2, + "INT" + ], + [ + 258, + 151, + 0, + 63, + 3, + "INT" + ], + [ + 263, + 150, + 0, + 153, + 0, + "*" + ], + [ + 264, + 151, + 0, + 154, + 0, + "*" + ], + [ + 267, + 63, + 1, + 157, + 0, + "*" + ], + [ + 286, + 150, + 0, + 64, + 2, + "INT" + ], + [ + 287, + 151, + 0, + 64, + 3, + "INT" + ], + [ + 290, + 178, + 0, + 172, + 0, + "POSEMODEL" + ], + [ + 294, + 172, + 0, + 173, + 0, + "POSEDATA" + ], + [ + 306, + 144, + 0, + 180, + 0, + "IMAGE" + ], + [ + 307, + 180, + 1, + 173, + 1, + "INT" + ], + [ + 308, + 180, + 2, + 173, + 2, + "INT" + ], + [ + 309, + 180, + 0, + 172, + 1, + "IMAGE" + ], + [ + 310, + 180, + 1, + 172, + 3, + "INT" + ], + [ + 311, + 180, + 2, + 172, + 4, + "INT" + ], + [ + 314, + 104, + 0, + 182, + 0, + "MASK" + ], + [ + 315, + 182, + 0, + 108, + 0, + "MASK" + ], + [ + 316, + 180, + 0, + 104, + 1, + "IMAGE" + ], + [ + 317, + 172, + 1, + 183, + 0, + "*" + ], + [ + 318, + 183, + 0, + 174, + 0, + "IMAGE" + ], + [ + 319, + 173, + 0, + 184, + 0, + "*" + ], + [ + 320, + 184, + 0, + 181, + 0, + "IMAGE" + ], + [ + 321, + 172, + 3, + 104, + 4, + "BBOX" + ], + [ + 322, + 188, + 0, + 189, + 0, + "*" + ], + [ + 323, + 190, + 0, + 191, + 0, + "MODEL" + ], + [ + 324, + 187, + 0, + 190, + 0, + "MODEL" + ], + [ + 325, + 191, + 0, + 192, + 0, + "MODEL" + ], + [ + 326, + 195, + 0, + 194, + 0, + "NOISE" + ], + [ + 331, + 193, + 0, + 198, + 0, + "CLIP" + ], + [ + 332, + 192, + 0, + 200, + 0, + "MODEL" + ], + [ + 334, + 198, + 0, + 201, + 0, + "CONDITIONING" + ], + [ + 336, + 199, + 0, + 200, + 1, + "CONDITIONING" + ], + [ + 337, + 199, + 1, + 200, + 2, + "CONDITIONING" + ], + [ + 338, + 198, + 0, + 199, + 0, + "CONDITIONING" + ], + [ + 339, + 199, + 2, + 194, + 4, + "LATENT" + ], + [ + 340, + 202, + 0, + 199, + 2, + "VAE" + ], + [ + 345, + 162, + 0, + 204, + 1, + "VAE" + ], + [ + 346, + 204, + 0, + 42, + 0, + "IMAGE" + ], + [ + 347, + 200, + 0, + 194, + 1, + "GUIDER" + ], + [ + 348, + 205, + 0, + 194, + 2, + "SAMPLER" + ], + [ + 349, + 206, + 0, + 194, + 3, + "SIGMAS" + ], + [ + 350, + 192, + 0, + 206, + 0, + "MODEL" + ], + [ + 351, + 133, + 0, + 199, + 4, + "IMAGE" + ], + [ + 352, + 138, + 0, + 199, + 5, + "IMAGE" + ], + [ + 353, + 141, + 0, + 199, + 6, + "IMAGE" + ], + [ + 354, + 131, + 0, + 199, + 7, + "IMAGE" + ], + [ + 355, + 143, + 0, + 199, + 8, + "MASK" + ], + [ + 356, + 155, + 0, + 199, + 10, + "INT" + ], + [ + 357, + 156, + 0, + 199, + 11, + "INT" + ], + [ + 358, + 158, + 0, + 199, + 12, + "INT" + ], + [ + 359, + 201, + 0, + 199, + 1, + "CONDITIONING" + ], + [ + 360, + 194, + 0, + 207, + 0, + "LATENT" + ], + [ + 361, + 199, + 3, + 207, + 1, + "INT" + ], + [ + 362, + 207, + 0, + 204, + 0, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Reference Image", + "bounding": [ + -1209.306884765625, + -1833.3065185546875, + 990.079833984375, + 724.450439453125 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Preprocessing", + "bounding": [ + -1227.062744140625, + -3202.685302734375, + 4104.810546875, + 1281.6610107421875 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Models", + "bounding": [ + -1224.449951171875, + -1055.772705078125, + 2156.392578125, + 1012.5536499023438 + ], + "color": "#88A", + "font_size": 24, + "flags": {} + }, + { + "id": 5, + "title": "Result collage", + "bounding": [ + 2370.66357421875, + -1369.016845703125, + 629.7467041015625, + 605.8086547851562 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 0.39142513012204794, + "offset": [ + 2614.7614997967285, + 1809.0478995517838 + ] + }, + "frontendVersion": "1.28.1", + "node_versions": { + "ComfyUI-WanVideoWrapper": "5a2383621a05825d0d0437781afcb8552d9590fd", + "ComfyUI-KJNodes": "a5bd3c86c8ed6b83c55c2d0e7a59515b15a0137f", + "ComfyUI-VideoHelperSuite": "0a75c7958fe320efcb052f1d9f8451fd20c730a8" + }, + "VHS_latentpreview": true, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/models/__pycache__/onnx_models.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/models/__pycache__/onnx_models.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5608e94337945efbaeebe3e8251639ed14fef9c Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/models/__pycache__/onnx_models.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/models/onnx_models.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/models/onnx_models.py new file mode 100644 index 0000000000000000000000000000000000000000..bde7931ec7ec5661ee9565cd061ca7e9f95571be --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/models/onnx_models.py @@ -0,0 +1,282 @@ +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. + +import cv2 +import numpy as np +import torch +import onnxruntime + +from ..pose_utils.pose2d_utils import box_convert_simple, keypoints_from_heatmaps + +class SimpleOnnxInference(object): + def __init__(self, checkpoint, device='CUDAExecutionProvider', **kwargs): + # Store initialization parameters for potential reinit + self.checkpoint = checkpoint + self.init_kwargs = kwargs + provider = [device, 'CPUExecutionProvider'] if device == 'CUDAExecutionProvider' else [device] + + self.provider = provider + self.session = onnxruntime.InferenceSession(checkpoint, providers=provider) + self.input_name = self.session.get_inputs()[0].name + self.output_name = self.session.get_outputs()[0].name + self.input_resolution = self.session.get_inputs()[0].shape[2:] + self.input_resolution = np.array(self.input_resolution) + + def __call__(self, *args, **kwargs): + return self.forward(*args, **kwargs) + + def get_output_names(self): + output_names = [] + for node in self.session.get_outputs(): + output_names.append(node.name) + return output_names + + def cleanup(self): + if hasattr(self, 'session') and self.session is not None: + # Close the ONNX Runtime session + del self.session + self.session = None + + def reinit(self, provider=None): + # Use provided provider or fall back to original provider + if provider is not None: + self.provider = provider + + if self.session is None: + checkpoint = self.checkpoint + self.session = onnxruntime.InferenceSession(checkpoint, providers=self.provider) + self.input_name = self.session.get_inputs()[0].name + self.output_name = self.session.get_outputs()[0].name + self.input_resolution = self.session.get_inputs()[0].shape[2:] + self.input_resolution = np.array(self.input_resolution) + +class Yolo(SimpleOnnxInference): + def __init__(self, checkpoint, device='cuda', threshold_conf=0.05, threshold_multi_persons=0.1, input_resolution=(640, 640), threshold_iou=0.5, threshold_bbox_shape_ratio=0.4, cat_id=[1], select_type='max', strict=True, sorted_func=None, **kwargs): + super(Yolo, self).__init__(checkpoint, device=device, **kwargs) + + model_inputs = self.session.get_inputs() + input_shape = model_inputs[0].shape + + self.input_width = 640 + self.input_height = 640 + + self.threshold_multi_persons = threshold_multi_persons + self.threshold_conf = threshold_conf + self.threshold_iou = threshold_iou + self.threshold_bbox_shape_ratio = threshold_bbox_shape_ratio + self.input_resolution = input_resolution + self.cat_id = cat_id + self.select_type = select_type + self.strict = strict + self.sorted_func = sorted_func + + + + def postprocess(self, output, shape_raw, cat_id=[1]): + """ + Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs. + + Args: + input_image (numpy.ndarray): The input image. + output (numpy.ndarray): The output of the model. + + Returns: + numpy.ndarray: The input image with detections drawn on it. + """ + # Transpose and squeeze the output to match the expected shape + + outputs = np.squeeze(output) + if len(outputs.shape) == 1: + outputs = outputs[None] + if output.shape[-1] != 6 and output.shape[1] == 84: + outputs = np.transpose(outputs) + + # Get the number of rows in the outputs array + rows = outputs.shape[0] + + # Calculate the scaling factors for the bounding box coordinates + x_factor = shape_raw[1] / self.input_width + y_factor = shape_raw[0] / self.input_height + + # Lists to store the bounding boxes, scores, and class IDs of the detections + boxes = [] + scores = [] + class_ids = [] + + if outputs.shape[-1] == 6: + max_scores = outputs[:, 4] + classid = outputs[:, -1] + + threshold_conf_masks = max_scores >= self.threshold_conf + classid_masks = classid[threshold_conf_masks] != 3.14159 + + max_scores = max_scores[threshold_conf_masks][classid_masks] + classid = classid[threshold_conf_masks][classid_masks] + + boxes = outputs[:, :4][threshold_conf_masks][classid_masks] + boxes[:, [0, 2]] *= x_factor + boxes[:, [1, 3]] *= y_factor + boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + boxes = boxes.astype(np.int32) + + else: + classes_scores = outputs[:, 4:] + max_scores = np.amax(classes_scores, -1) + threshold_conf_masks = max_scores >= self.threshold_conf + + classid = np.argmax(classes_scores[threshold_conf_masks], -1) + + classid_masks = classid!=3.14159 + + classes_scores = classes_scores[threshold_conf_masks][classid_masks] + max_scores = max_scores[threshold_conf_masks][classid_masks] + classid = classid[classid_masks] + + xywh = outputs[:, :4][threshold_conf_masks][classid_masks] + + x = xywh[:, 0:1] + y = xywh[:, 1:2] + w = xywh[:, 2:3] + h = xywh[:, 3:4] + + left = ((x - w / 2) * x_factor) + top = ((y - h / 2) * y_factor) + width = (w * x_factor) + height = (h * y_factor) + boxes = np.concatenate([left, top, width, height], axis=-1).astype(np.int32) + + boxes = boxes.tolist() + scores = max_scores.tolist() + class_ids = classid.tolist() + + # Apply non-maximum suppression to filter out overlapping bounding boxes + indices = cv2.dnn.NMSBoxes(boxes, scores, self.threshold_conf, self.threshold_iou) + # Iterate over the selected indices after non-maximum suppression + + results = [] + for i in indices: + # Get the box, score, and class ID corresponding to the index + box = box_convert_simple(boxes[i], 'xywh2xyxy') + score = scores[i] + class_id = class_ids[i] + results.append(box + [score] + [class_id]) + # # Draw the detection on the input image + + # Return the modified input image + return np.array(results) + + + def process_results(self, results, shape_raw, cat_id=[1], single_person=True): + if isinstance(results, tuple): + det_results = results[0] + else: + det_results = results + + person_results = [] + person_count = 0 + if len(results): + max_idx = -1 + max_bbox_size = shape_raw[0] * shape_raw[1] * -10 + max_bbox_shape = -1 + + bboxes = [] + idx_list = [] + for i in range(results.shape[0]): + bbox = results[i] + if (bbox[-1] + 1 in cat_id) and (bbox[-2] > self.threshold_conf): + idx_list.append(i) + bbox_shape = max((bbox[2] - bbox[0]), ((bbox[3] - bbox[1]))) + if bbox_shape > max_bbox_shape: + max_bbox_shape = bbox_shape + + results = results[idx_list] + + for i in range(results.shape[0]): + bbox = results[i] + bboxes.append(bbox) + if self.select_type == 'max': + bbox_size = (bbox[2] - bbox[0]) * ((bbox[3] - bbox[1])) + elif self.select_type == 'center': + bbox_size = (abs((bbox[2] + bbox[0]) / 2 - shape_raw[1]/2)) * -1 + bbox_shape = max((bbox[2] - bbox[0]), ((bbox[3] - bbox[1]))) + if bbox_size > max_bbox_size: + if (self.strict or max_idx != -1) and bbox_shape < max_bbox_shape * self.threshold_bbox_shape_ratio: + continue + max_bbox_size = bbox_size + max_bbox_shape = bbox_shape + max_idx = i + + if self.sorted_func is not None and len(bboxes) > 0: + max_idx = self.sorted_func(bboxes, shape_raw) + bbox = bboxes[max_idx] + if self.select_type == 'max': + max_bbox_size = (bbox[2] - bbox[0]) * ((bbox[3] - bbox[1])) + elif self.select_type == 'center': + max_bbox_size = (abs((bbox[2] + bbox[0]) / 2 - shape_raw[1]/2)) * -1 + + if max_idx != -1: + person_count = 1 + + if max_idx != -1: + person = {} + person['bbox'] = results[max_idx, :5] + person['track_id'] = int(0) + person_results.append(person) + + for i in range(results.shape[0]): + bbox = results[i] + if (bbox[-1] + 1 in cat_id) and (bbox[-2] > self.threshold_conf): + if self.select_type == 'max': + bbox_size = (bbox[2] - bbox[0]) * ((bbox[3] - bbox[1])) + elif self.select_type == 'center': + bbox_size = (abs((bbox[2] + bbox[0]) / 2 - shape_raw[1]/2)) * -1 + if i != max_idx and bbox_size > max_bbox_size * self.threshold_multi_persons and bbox_size < max_bbox_size: + person_count += 1 + if not single_person: + person = {} + person['bbox'] = results[i, :5] + person['track_id'] = int(person_count - 1) + person_results.append(person) + return person_results + else: + return None + + + def postprocess_threading(self, outputs, shape_raw, person_results, i, single_person=True, **kwargs): + result = self.postprocess(outputs[i], shape_raw[i], cat_id=self.cat_id) + result = self.process_results(result, shape_raw[i], cat_id=self.cat_id, single_person=single_person) + if result is not None and len(result) != 0: + person_results[i] = result + + + def forward(self, img, shape_raw, **kwargs): + """ + Performs inference using an ONNX model and returns the output image with drawn detections. + + Returns: + output_img: The output image with drawn detections. + """ + if isinstance(img, torch.Tensor): + img = img.cpu().numpy() + shape_raw = shape_raw.cpu().numpy() + + outputs = self.session.run(None, {self.session.get_inputs()[0].name: img})[0] + person_results = [[{'bbox': np.array([0., 0., 1.*shape_raw[i][1], 1.*shape_raw[i][0], -1]), 'track_id': -1}] for i in range(len(outputs))] + + for i in range(len(outputs)): + self.postprocess_threading(outputs, shape_raw, person_results, i, **kwargs) + return person_results + + +class ViTPose(SimpleOnnxInference): + def __init__(self, checkpoint, device='cuda', **kwargs): + super(ViTPose, self).__init__(checkpoint, device=device) + + def forward(self, img, center, scale, **kwargs): + heatmaps = self.session.run([], {self.session.get_inputs()[0].name: img})[0] + points, prob = keypoints_from_heatmaps(heatmaps=heatmaps, + center=center, + scale=scale*200, + unbiased=True, + use_udp=False) + return np.concatenate([points, prob], axis=2) diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/nodes.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..6621b2af459f1787d1ed0eaa8d70ac6a65503ffd --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/nodes.py @@ -0,0 +1,494 @@ +import os +import torch +from tqdm import tqdm +import numpy as np +import folder_paths +import cv2 +import json +import logging +script_directory = os.path.dirname(os.path.abspath(__file__)) + +from comfy import model_management as mm +from comfy.utils import ProgressBar +device = mm.get_torch_device() +offload_device = mm.unet_offload_device() + +folder_paths.add_model_folder_path("detection", os.path.join(folder_paths.models_dir, "detection")) + +from .models.onnx_models import ViTPose, Yolo +from .pose_utils.pose2d_utils import load_pose_metas_from_kp2ds_seq, crop, bbox_from_detector +from .utils import get_face_bboxes, padding_resize, resize_by_area, resize_to_bounds +from .pose_utils.human_visualization import AAPoseMeta, draw_aapose_by_meta_new +from .retarget_pose import get_retarget_pose + +class OnnxDetectionModelLoader: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "vitpose_model": (folder_paths.get_filename_list("detection"), {"tooltip": "These models are loaded from the 'ComfyUI/models/detection' -folder",}), + "yolo_model": (folder_paths.get_filename_list("detection"), {"tooltip": "These models are loaded from the 'ComfyUI/models/detection' -folder",}), + "onnx_device": (["CUDAExecutionProvider", "CPUExecutionProvider"], {"default": "CUDAExecutionProvider", "tooltip": "Device to run the ONNX models on"}), + }, + } + + RETURN_TYPES = ("POSEMODEL",) + RETURN_NAMES = ("model", ) + FUNCTION = "loadmodel" + CATEGORY = "WanAnimatePreprocess" + DESCRIPTION = "Loads ONNX models for pose and face detection. ViTPose for pose estimation and YOLO for object detection." + + def loadmodel(self, vitpose_model, yolo_model, onnx_device): + + vitpose_model_path = folder_paths.get_full_path_or_raise("detection", vitpose_model) + yolo_model_path = folder_paths.get_full_path_or_raise("detection", yolo_model) + + vitpose = ViTPose(vitpose_model_path, onnx_device) + yolo = Yolo(yolo_model_path, onnx_device) + + model = { + "vitpose": vitpose, + "yolo": yolo, + } + + return (model, ) + +class PoseAndFaceDetection: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("POSEMODEL",), + "images": ("IMAGE",), + "width": ("INT", {"default": 832, "min": 64, "max": 2048, "step": 1, "tooltip": "Width of the generation"}), + "height": ("INT", {"default": 480, "min": 64, "max": 2048, "step": 1, "tooltip": "Height of the generation"}), + }, + "optional": { + "retarget_image": ("IMAGE", {"default": None, "tooltip": "Optional reference image for pose retargeting"}), + "face_padding": ("INT", {"default": 0, "min": 0, "max": 512, "step": 1, "tooltip": "When > 0, the detected face images are padded and resized to 512x512"}), + }, + } + + RETURN_TYPES = ("POSEDATA", "IMAGE", "STRING", "BBOX", "BBOX,") + RETURN_NAMES = ("pose_data", "face_images", "key_frame_body_points", "bboxes", "face_bboxes") + FUNCTION = "process" + CATEGORY = "WanAnimatePreprocess" + DESCRIPTION = "Detects human poses and face images from input images. Optionally retargets poses based on a reference image." + + def process(self, model, images, width, height, retarget_image=None, face_padding=0): + detector = model["yolo"] + pose_model = model["vitpose"] + B, H, W, C = images.shape + + shape = np.array([H, W])[None] + images_np = images.numpy() + + IMG_NORM_MEAN = np.array([0.485, 0.456, 0.406]) + IMG_NORM_STD = np.array([0.229, 0.224, 0.225]) + input_resolution=(256, 192) + rescale = 1.25 + + detector.reinit() + pose_model.reinit() + if retarget_image is not None: + refer_img = resize_by_area(retarget_image[0].numpy() * 255, width * height, divisor=16) / 255.0 + ref_bbox = (detector( + cv2.resize(refer_img.astype(np.float32), (640, 640)).transpose(2, 0, 1)[None], + shape + )[0][0]["bbox"]) + + if ref_bbox is None or ref_bbox[-1] <= 0 or (ref_bbox[2] - ref_bbox[0]) < 10 or (ref_bbox[3] - ref_bbox[1]) < 10: + ref_bbox = np.array([0, 0, refer_img.shape[1], refer_img.shape[0]]) + + center, scale = bbox_from_detector(ref_bbox, input_resolution, rescale=rescale) + refer_img = crop(refer_img, center, scale, (input_resolution[0], input_resolution[1]))[0] + + img_norm = (refer_img - IMG_NORM_MEAN) / IMG_NORM_STD + img_norm = img_norm.transpose(2, 0, 1).astype(np.float32) + + ref_keypoints = pose_model(img_norm[None], np.array(center)[None], np.array(scale)[None]) + refer_pose_meta = load_pose_metas_from_kp2ds_seq(ref_keypoints, width=retarget_image.shape[2], height=retarget_image.shape[1])[0] + + comfy_pbar = ProgressBar(B*2) + progress = 0 + bboxes = [] + for img in tqdm(images_np, total=len(images_np), desc="Detecting bboxes"): + bboxes.append(detector( + cv2.resize(img, (640, 640)).transpose(2, 0, 1)[None], + shape + )[0][0]["bbox"]) + progress += 1 + if progress % 10 == 0: + comfy_pbar.update_absolute(progress) + + detector.cleanup() + + kp2ds = [] + for img, bbox in tqdm(zip(images_np, bboxes), total=len(images_np), desc="Extracting keypoints"): + if bbox is None or bbox[-1] <= 0 or (bbox[2] - bbox[0]) < 10 or (bbox[3] - bbox[1]) < 10: + bbox = np.array([0, 0, img.shape[1], img.shape[0]]) + + bbox_xywh = bbox + center, scale = bbox_from_detector(bbox_xywh, input_resolution, rescale=rescale) + img = crop(img, center, scale, (input_resolution[0], input_resolution[1]))[0] + + img_norm = (img - IMG_NORM_MEAN) / IMG_NORM_STD + img_norm = img_norm.transpose(2, 0, 1).astype(np.float32) + + keypoints = pose_model(img_norm[None], np.array(center)[None], np.array(scale)[None]) + kp2ds.append(keypoints) + progress += 1 + if progress % 10 == 0: + comfy_pbar.update_absolute(progress) + + pose_model.cleanup() + + kp2ds = np.concatenate(kp2ds, 0) + pose_metas = load_pose_metas_from_kp2ds_seq(kp2ds, width=W, height=H) + + face_images = [] + face_bboxes = [] + for idx, meta in enumerate(pose_metas): + face_bbox_for_image = get_face_bboxes(meta['keypoints_face'][:, :2], scale=1.3, image_shape=(H, W)) + x1, x2, y1, y2 = face_bbox_for_image + if face_padding > 0: + x1 = max(0, x1 - face_padding) + y1 = max(0, y1 - face_padding) + x2 = min(W, x2 + face_padding) + y2 = min(H, y2 + face_padding) + face_bboxes.append((x1, y1, x2, y2)) + face_image = images_np[idx][y1:y2, x1:x2] + # Check if face_image is valid before resizing + if face_image.size == 0 or face_image.shape[0] == 0 or face_image.shape[1] == 0: + logging.warning(f"Empty face crop on frame {idx}, creating fallback image.") + # Create a fallback image (black or use center crop) + fallback_size = int(min(H, W) * 0.3) + fallback_x1 = (W - fallback_size) // 2 + fallback_x2 = fallback_x1 + fallback_size + fallback_y1 = int(H * 0.1) + fallback_y2 = fallback_y1 + fallback_size + face_image = images_np[idx][fallback_y1:fallback_y2, fallback_x1:fallback_x2] + + # If still empty, create a black image + if face_image.size == 0: + face_image = np.zeros((fallback_size, fallback_size, C), dtype=images_np.dtype) + face_image = cv2.resize(face_image, (512, 512)) + face_images.append(face_image) + + face_images_np = np.stack(face_images, 0) + face_images_tensor = torch.from_numpy(face_images_np) + + if retarget_image is not None and refer_pose_meta is not None: + retarget_pose_metas = get_retarget_pose(pose_metas[0], refer_pose_meta, pose_metas, None, None) + else: + retarget_pose_metas = [AAPoseMeta.from_humanapi_meta(meta) for meta in pose_metas] + + bbox = np.array(bboxes[0]).flatten() + if bbox.shape[0] >= 4: + bbox_ints = tuple(int(v) for v in bbox[:4]) + else: + bbox_ints = (0, 0, 0, 0) + + key_frame_num = 4 if B >= 4 else 1 + key_frame_step = len(pose_metas) // key_frame_num + key_frame_index_list = list(range(0, len(pose_metas), key_frame_step)) + + key_points_index = [0, 1, 2, 5, 8, 11, 10, 13] + + for key_frame_index in key_frame_index_list: + keypoints_body_list = [] + body_key_points = pose_metas[key_frame_index]['keypoints_body'] + for each_index in key_points_index: + each_keypoint = body_key_points[each_index] + if None is each_keypoint: + continue + keypoints_body_list.append(each_keypoint) + + keypoints_body = np.array(keypoints_body_list)[:, :2] + wh = np.array([[pose_metas[0]['width'], pose_metas[0]['height']]]) + points = (keypoints_body * wh).astype(np.int32) + points_dict_list = [] + for point in points: + points_dict_list.append({"x": int(point[0]), "y": int(point[1])}) + + pose_data = { + "retarget_image": refer_img if retarget_image is not None else None, + "pose_metas": retarget_pose_metas, + "refer_pose_meta": refer_pose_meta if retarget_image is not None else None, + "pose_metas_original": pose_metas, + } + + return (pose_data, face_images_tensor, json.dumps(points_dict_list), [bbox_ints], face_bboxes) + +class DrawViTPose: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pose_data": ("POSEDATA",), + "width": ("INT", {"default": 832, "min": 64, "max": 2048, "step": 1, "tooltip": "Width of the generation"}), + "height": ("INT", {"default": 480, "min": 64, "max": 2048, "step": 1, "tooltip": "Height of the generation"}), + "retarget_padding": ("INT", {"default": 16, "min": 0, "max": 512, "step": 1, "tooltip": "When > 0, the retargeted pose image is padded and resized to the target size"}), + "body_stick_width": ("INT", {"default": -1, "min": -1, "max": 20, "step": 1, "tooltip": "Width of the body sticks. Set to 0 to disable body drawing, -1 for auto"}), + "hand_stick_width": ("INT", {"default": -1, "min": -1, "max": 20, "step": 1, "tooltip": "Width of the hand sticks. Set to 0 to disable hand drawing, -1 for auto"}), + "draw_head": ("BOOLEAN", {"default": "True", "tooltip": "Whether to draw head keypoints"}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("pose_images", ) + FUNCTION = "process" + CATEGORY = "WanAnimatePreprocess" + DESCRIPTION = "Draws pose images from pose data." + + def process(self, pose_data, width, height, body_stick_width, hand_stick_width, draw_head, retarget_padding=64): + + retarget_image = pose_data.get("retarget_image", None) + pose_metas = pose_data["pose_metas"] + + draw_hand = hand_stick_width != 0 + use_retarget_resize = retarget_padding > 0 and retarget_image is not None + + comfy_pbar = ProgressBar(len(pose_metas)) + progress = 0 + crop_target_image = None + pose_images = [] + + for idx, meta in enumerate(tqdm(pose_metas, desc="Drawing pose images")): + canvas = np.zeros((height, width, 3), dtype=np.uint8) + pose_image = draw_aapose_by_meta_new(canvas, meta, draw_hand=draw_hand, draw_head=draw_head, body_stick_width=body_stick_width, hand_stick_width=hand_stick_width) + + if crop_target_image is None: + crop_target_image = pose_image + + if use_retarget_resize: + pose_image = resize_to_bounds(pose_image, height, width, crop_target_image=crop_target_image, extra_padding=retarget_padding) + else: + pose_image = padding_resize(pose_image, height, width) + + pose_images.append(pose_image) + progress += 1 + if progress % 10 == 0: + comfy_pbar.update_absolute(progress) + + pose_images_np = np.stack(pose_images, 0) + pose_images_tensor = torch.from_numpy(pose_images_np).float() / 255.0 + + return (pose_images_tensor, ) + +class PoseRetargetPromptHelper: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pose_data": ("POSEDATA",), + }, + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("prompt", "retarget_prompt", ) + FUNCTION = "process" + CATEGORY = "WanAnimatePreprocess" + DESCRIPTION = "Generates text prompts for pose retargeting based on visibility of arms and legs in the template pose. Originally used for Flux Kontext" + + def process(self, pose_data): + refer_pose_meta = pose_data.get("refer_pose_meta", None) + if refer_pose_meta is None: + return ("Change the person to face forward.", "Change the person to face forward.", ) + tpl_pose_metas = pose_data["pose_metas_original"] + arm_visible = False + leg_visible = False + + for tpl_pose_meta in tpl_pose_metas: + tpl_keypoints = tpl_pose_meta['keypoints_body'] + tpl_keypoints = np.array(tpl_keypoints) + if np.any(tpl_keypoints[3]) != 0 or np.any(tpl_keypoints[4]) != 0 or np.any(tpl_keypoints[6]) != 0 or np.any(tpl_keypoints[7]) != 0: + if (tpl_keypoints[3][0] <= 1 and tpl_keypoints[3][1] <= 1 and tpl_keypoints[3][2] >= 0.75) or (tpl_keypoints[4][0] <= 1 and tpl_keypoints[4][1] <= 1 and tpl_keypoints[4][2] >= 0.75) or \ + (tpl_keypoints[6][0] <= 1 and tpl_keypoints[6][1] <= 1 and tpl_keypoints[6][2] >= 0.75) or (tpl_keypoints[7][0] <= 1 and tpl_keypoints[7][1] <= 1 and tpl_keypoints[7][2] >= 0.75): + arm_visible = True + if np.any(tpl_keypoints[9]) != 0 or np.any(tpl_keypoints[12]) != 0 or np.any(tpl_keypoints[10]) != 0 or np.any(tpl_keypoints[13]) != 0: + if (tpl_keypoints[9][0] <= 1 and tpl_keypoints[9][1] <= 1 and tpl_keypoints[9][2] >= 0.75) or (tpl_keypoints[12][0] <= 1 and tpl_keypoints[12][1] <= 1 and tpl_keypoints[12][2] >= 0.75) or \ + (tpl_keypoints[10][0] <= 1 and tpl_keypoints[10][1] <= 1 and tpl_keypoints[10][2] >= 0.75) or (tpl_keypoints[13][0] <= 1 and tpl_keypoints[13][1] <= 1 and tpl_keypoints[13][2] >= 0.75): + leg_visible = True + if arm_visible and leg_visible: + break + + if leg_visible: + if tpl_pose_meta['width'] > tpl_pose_meta['height']: + tpl_prompt = "Change the person to a standard T-pose (facing forward with arms extended). The person is standing. Feet and Hands are visible in the image." + else: + tpl_prompt = "Change the person to a standard pose with the face oriented forward and arms extending straight down by the sides. The person is standing. Feet and Hands are visible in the image." + + if refer_pose_meta['width'] > refer_pose_meta['height']: + refer_prompt = "Change the person to a standard T-pose (facing forward with arms extended). The person is standing. Feet and Hands are visible in the image." + else: + refer_prompt = "Change the person to a standard pose with the face oriented forward and arms extending straight down by the sides. The person is standing. Feet and Hands are visible in the image." + elif arm_visible: + if tpl_pose_meta['width'] > tpl_pose_meta['height']: + tpl_prompt = "Change the person to a standard T-pose (facing forward with arms extended). Hands are visible in the image." + else: + tpl_prompt = "Change the person to a standard pose with the face oriented forward and arms extending straight down by the sides. Hands are visible in the image." + + if refer_pose_meta['width'] > refer_pose_meta['height']: + refer_prompt = "Change the person to a standard T-pose (facing forward with arms extended). Hands are visible in the image." + else: + refer_prompt = "Change the person to a standard pose with the face oriented forward and arms extending straight down by the sides. Hands are visible in the image." + else: + tpl_prompt = "Change the person to face forward." + refer_prompt = "Change the person to face forward." + + return (tpl_prompt, refer_prompt, ) + +class PoseDetectionOneToAllAnimation: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("POSEMODEL",), + "images": ("IMAGE",), + "width": ("INT", {"default": 832, "min": 64, "max": 2048, "step": 2, "tooltip": "Width of the generation"}), + "height": ("INT", {"default": 480, "min": 64, "max": 2048, "step": 2, "tooltip": "Height of the generation"}), + "align_to": (["ref", "pose", "none"], {"default": "ref", "tooltip": "Alignment mode for poses"}), + "draw_face_points": (["full", "weak", "none"], {"default": "full", "tooltip": "Whether to draw face keypoints on the pose images"}), + "draw_head": (["full", "weak", "none"], {"default": "full", "tooltip": "Whether to draw head keypoints on the pose images"}), + }, + "optional": { + "ref_image": ("IMAGE", {"default": None, "tooltip": "Optional reference image for pose retargeting"}), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK",) + RETURN_NAMES = ("pose_images", "ref_pose_image", "ref_image", "ref_mask") + FUNCTION = "process" + CATEGORY = "WanAnimatePreprocess" + DESCRIPTION = "Specialized pose detection and alignment for OneToAllAnimation model https://github.com/ssj9596/One-to-All-Animation. Detects poses from input images and aligns them based on a reference image if provided." + + def process(self, model, images, width, height, align_to, draw_face_points, draw_head, ref_image=None): + from .onetoall.infer_function import aaposemeta_to_dwpose, align_to_reference, align_to_pose + from .onetoall.utils import draw_pose_aligned, warp_ref_to_pose + detector = model["yolo"] + pose_model = model["vitpose"] + B, H, W, C = images.shape + + shape = np.array([H, W])[None] + images_np = images.numpy() + + IMG_NORM_MEAN = np.array([0.485, 0.456, 0.406]) + IMG_NORM_STD = np.array([0.229, 0.224, 0.225]) + input_resolution=(256, 192) + rescale = 1.25 + + detector.reinit() + pose_model.reinit() + + if ref_image is not None: + refer_img_np = ref_image[0].numpy() * 255 + refer_img = resize_by_area(refer_img_np, width * height, divisor=16) / 255.0 + ref_bbox = (detector( + cv2.resize(refer_img.astype(np.float32), (640, 640)).transpose(2, 0, 1)[None], + shape + )[0][0]["bbox"]) + + if ref_bbox is None or ref_bbox[-1] <= 0 or (ref_bbox[2] - ref_bbox[0]) < 10 or (ref_bbox[3] - ref_bbox[1]) < 10: + ref_bbox = np.array([0, 0, refer_img.shape[1], refer_img.shape[0]]) + + center, scale = bbox_from_detector(ref_bbox, input_resolution, rescale=rescale) + refer_img = crop(refer_img, center, scale, (input_resolution[0], input_resolution[1]))[0] + + img_norm = (refer_img - IMG_NORM_MEAN) / IMG_NORM_STD + img_norm = img_norm.transpose(2, 0, 1).astype(np.float32) + + ref_keypoints = pose_model(img_norm[None], np.array(center)[None], np.array(scale)[None]) + refer_pose_meta = load_pose_metas_from_kp2ds_seq(ref_keypoints, width=ref_image.shape[2], height=ref_image.shape[1])[0] + + ref_dwpose = aaposemeta_to_dwpose(refer_pose_meta) + + comfy_pbar = ProgressBar(B*2) + progress = 0 + bboxes = [] + for img in tqdm(images_np, total=len(images_np), desc="Detecting bboxes"): + bboxes.append(detector( + cv2.resize(img, (640, 640)).transpose(2, 0, 1)[None], + shape + )[0][0]["bbox"]) + progress += 1 + if progress % 10 == 0: + comfy_pbar.update_absolute(progress) + + detector.cleanup() + + kp2ds = [] + for img, bbox in tqdm(zip(images_np, bboxes), total=len(images_np), desc="Extracting keypoints"): + if bbox is None or bbox[-1] <= 0 or (bbox[2] - bbox[0]) < 10 or (bbox[3] - bbox[1]) < 10: + bbox = np.array([0, 0, img.shape[1], img.shape[0]]) + + bbox_xywh = bbox + center, scale = bbox_from_detector(bbox_xywh, input_resolution, rescale=rescale) + img = crop(img, center, scale, (input_resolution[0], input_resolution[1]))[0] + + img_norm = (img - IMG_NORM_MEAN) / IMG_NORM_STD + img_norm = img_norm.transpose(2, 0, 1).astype(np.float32) + + keypoints = pose_model(img_norm[None], np.array(center)[None], np.array(scale)[None]) + kp2ds.append(keypoints) + progress += 1 + if progress % 10 == 0: + comfy_pbar.update_absolute(progress) + + pose_model.cleanup() + + kp2ds = np.concatenate(kp2ds, 0) + pose_metas = load_pose_metas_from_kp2ds_seq(kp2ds, width=W, height=H) + tpl_dwposes = [aaposemeta_to_dwpose(meta) for meta in pose_metas] + + ref_pose_image_tensor = None + if ref_image is not None: + if align_to == "ref": + ref_pose_image = draw_pose_aligned(ref_dwpose, height, width, without_face=True) + ref_pose_image_np = np.stack(ref_pose_image, 0) + ref_pose_image_tensor = torch.from_numpy(ref_pose_image_np).unsqueeze(0).float() / 255.0 + tpl_dwposes = align_to_reference(refer_pose_meta, pose_metas, tpl_dwposes, anchor_idx=0) + image_input_tensor = ref_image + image_mask_tensor = torch.zeros(1, ref_image.shape[1], ref_image.shape[2], dtype=torch.float32, device="cpu") + elif align_to == "pose": + image_input, ref_pose_image_np, image_mask = warp_ref_to_pose(refer_img_np, tpl_dwposes[0], ref_dwpose) + ref_pose_image_np = np.stack(ref_pose_image_np, 0) + ref_pose_image_tensor = torch.from_numpy(ref_pose_image_np).unsqueeze(0).float() / 255.0 + tpl_dwposes = align_to_pose(ref_dwpose, tpl_dwposes, anchor_idx=0) + image_input_tensor = torch.from_numpy(image_input).unsqueeze(0).float() / 255.0 + image_mask_tensor = torch.from_numpy(image_mask).unsqueeze(0).float() / 255.0 + elif align_to == "none": + ref_pose_image = draw_pose_aligned(ref_dwpose, height, width, without_face=True) + ref_pose_image_np = np.stack(ref_pose_image, 0) + ref_pose_image_tensor = torch.from_numpy(ref_pose_image_np).unsqueeze(0).float() / 255.0 + image_input_tensor = ref_image + image_mask_tensor = torch.zeros(1, ref_image.shape[1], ref_image.shape[2], dtype=torch.float32, device="cpu") + else: + ref_pose_image_tensor = torch.zeros((1, height, width, 3), dtype=torch.float32, device="cpu") + image_input_tensor = torch.zeros((1, height, width, 3), dtype=torch.float32, device="cpu") + image_mask_tensor = torch.zeros(1, height, width, dtype=torch.float32, device="cpu") + + pose_imgs = [] + for pose_np in tpl_dwposes: + pose_img = draw_pose_aligned(pose_np, height, width, without_face=(draw_face_points=="none"), face_change=(draw_face_points=="weak"), head_strength=draw_head) + pose_img = torch.from_numpy(np.array(pose_img)) + pose_imgs.append(pose_img) + + pose_tensor = torch.stack(pose_imgs).cpu().float() / 255.0 + + return (pose_tensor, ref_pose_image_tensor, image_input_tensor, image_mask_tensor) + +NODE_CLASS_MAPPINGS = { + "OnnxDetectionModelLoader": OnnxDetectionModelLoader, + "PoseAndFaceDetection": PoseAndFaceDetection, + "DrawViTPose": DrawViTPose, + "PoseRetargetPromptHelper": PoseRetargetPromptHelper, + "PoseDetectionOneToAllAnimation": PoseDetectionOneToAllAnimation, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "OnnxDetectionModelLoader": "ONNX Detection Model Loader", + "PoseAndFaceDetection": "Pose and Face Detection", + "DrawViTPose": "Draw ViT Pose", + "PoseRetargetPromptHelper": "Pose Retarget Prompt Helper", + "PoseDetectionOneToAllAnimation": "Pose Detection OneToAll Animation", +} diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/onetoall/infer_function.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/onetoall/infer_function.py new file mode 100644 index 0000000000000000000000000000000000000000..6381bdf18207c42a5568c54ea1dad530c5c8b7e1 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/onetoall/infer_function.py @@ -0,0 +1,508 @@ +# https://github.com/ssj9596/One-to-All-Animation + +import numpy as np +import copy +from ..retarget_pose import get_retarget_pose + +L_EYE_IDXS = list(range(36, 42)) +R_EYE_IDXS = list(range(42, 48)) +NOSE_TIP = 30 +MOUTH_L = 48 +MOUTH_R = 54 +JAW_LINE = list(range(0, 17)) + + +# ===========================Convert wanpose format into our dwpose-like format====================== +def aaposemeta_to_dwpose(meta): + candidate_body = meta['keypoints_body'][:-2][:, :2] + score_body = meta['keypoints_body'][:-2][:, 2] + subset_body = np.arange(len(candidate_body), dtype=float) + subset_body[score_body <= 0] = -1 + bodies = { + "candidate": candidate_body, + "subset": np.expand_dims(subset_body, axis=0), # shape (1, N) + "score": np.expand_dims(score_body, axis=0) # shape (1, N) + } + hands_coords = np.stack([ + meta['keypoints_right_hand'][:, :2], + meta['keypoints_left_hand'][:, :2] + ]) + hands_score = np.stack([ + meta['keypoints_right_hand'][:, 2], + meta['keypoints_left_hand'][:, 2] + ]) + faces_coords = np.expand_dims(meta['keypoints_face'][1:][:, :2], axis=0) + faces_score = np.expand_dims(meta['keypoints_face'][1:][:, 2], axis=0) + dwpose_format = { + "bodies": bodies, + "hands": hands_coords, + "hands_score": hands_score, + "faces": faces_coords, + "faces_score": faces_score + } + return dwpose_format + +def aaposemeta_obj_to_dwpose(pose_meta): + """ + Convert an AAPoseMeta object into a dwpose-like data structure + Restore coordinates to relative coordinates (divide by width, height) + Only handle None -> fill with zeros + """ + w = pose_meta.width + h = pose_meta.height + + # If None, fill with all zeros + def safe(arr, like_shape): + if arr is None: + return np.zeros(like_shape, dtype=np.float32) + arr_np = np.array(arr, dtype=np.float32) + arr_np = np.nan_to_num(arr_np, nan=0.0) + return arr_np + # body + kps_body = safe(pose_meta.kps_body, (pose_meta.kps_body_p.shape[0], 2)) + candidate_body = kps_body / np.array([w, h]) + score_body = safe(pose_meta.kps_body_p, (candidate_body.shape[0],)) + subset_body = np.arange(len(candidate_body), dtype=float) + subset_body[score_body <= 0] = -1 + bodies = { + "candidate": candidate_body, + "subset": np.expand_dims(subset_body, axis=0), + "score": np.expand_dims(score_body, axis=0) + } + + # hands + kps_rhand = safe(pose_meta.kps_rhand, (pose_meta.kps_rhand_p.shape[0], 2)) + kps_lhand = safe(pose_meta.kps_lhand, (pose_meta.kps_lhand_p.shape[0], 2)) + hands_coords = np.stack([ + kps_rhand / np.array([w, h]), + kps_lhand / np.array([w, h]) + ]) + hands_score = np.stack([ + safe(pose_meta.kps_rhand_p, (kps_rhand.shape[0],)), + safe(pose_meta.kps_lhand_p, (kps_lhand.shape[0],)) + ]) + + dwpose_format = { + "bodies": bodies, + "hands": hands_coords, + "hands_score": hands_score, + "faces": None, + "faces_score": None + } + return dwpose_format + +# ===============================Face Rough alignment====================== + +def _to_68x2(arr): + if arr.shape == (1, 68, 2): + def to_orig(x): + x = np.asarray(x, dtype=np.float64) + if x.shape != (68, 2): + raise ValueError("to_orig expects (68,2)") + return x[np.newaxis, :, :] + return arr[0].astype(np.float64), to_orig + if arr.shape == (68, 2): + def to_orig(x): + x = np.asarray(x, dtype=np.float64) + if x.shape != (68, 2): + raise ValueError("to_orig expects (68,2)") + return x + return arr.astype(np.float64), to_orig + if arr.shape == (2, 68): + def to_orig(x): + x = np.asarray(x, dtype=np.float64) + if x.shape != (68, 2): + raise ValueError("to_orig expects (68,2)") + return x.T + return arr.T.astype(np.float64), to_orig + raise ValueError(f"faces shape {arr.shape} not supported; expected (1,68,2) or (68,2) or (2,68)") + +def _eye_center(face68, idxs): + return face68[idxs].mean(axis=0) + +def _anchors(face68): + le = _eye_center(face68, L_EYE_IDXS) + re = _eye_center(face68, R_EYE_IDXS) + nose = face68[NOSE_TIP] + lm = face68[MOUTH_L] + rm = face68[MOUTH_R] + if re[0] < le[0]: + le, re = re, le + return np.stack([le, re, nose, lm, rm], axis=0) + +def _face_scale_only(src68, ref68, target_nose_pos, alpha=1.0, anchor_pairs=[[36, 45], [27, 8]]): + """ + Rough alignment - adjust the shape of the source face according to the proportions of the reference, and align the nose tip to target_nose_pos. + anchor_pairs: + - [36, 45] for x + - [27, 8] for y + """ + src = np.asarray(src68, dtype=np.float64) + ref = np.asarray(ref68, dtype=np.float64) + + center = _anchors(src).mean(axis=0) + src_centered = src - center + + src_w = np.linalg.norm(src[anchor_pairs[0][0]] - src[anchor_pairs[0][1]]) + ref_w = np.linalg.norm(ref[anchor_pairs[0][0]] - ref[anchor_pairs[0][1]]) + + src_h = np.linalg.norm(src[anchor_pairs[1][0]] - src[anchor_pairs[1][1]]) + ref_h = np.linalg.norm(ref[anchor_pairs[1][0]] - ref[anchor_pairs[1][1]]) + + scale_x = ref_w / src_w if src_w > 1e-6 else 1.0 + scale_y = ref_h / src_h if src_h > 1e-6 else 1.0 + + scaled_local = src_centered.copy() + scaled_local[:, 0] *= (1 - alpha) + scale_x * alpha + scaled_local[:, 1] *= (1 - alpha) + scale_y * alpha + scaled_global = scaled_local + center + + nose_idx = NOSE_TIP + current_nose = scaled_global[nose_idx] + offset = target_nose_pos - current_nose + scaled_global += offset + + return scaled_global + +# ===============================Reference Img Pre-Process====================== + + +def scale_and_translate_pose(tgt_pose, ref_pose, conf_th=0.9, return_ratio=False): + aligned_pose = copy.deepcopy(tgt_pose) + th = 1e-6 + ref_kpt = ref_pose['bodies']['candidate'].astype(np.float32) + tgt_kpt = aligned_pose['bodies']['candidate'].astype(np.float32) + + ref_sc = ref_pose['bodies'].get('score', np.ones(ref_kpt.shape[0])).astype(np.float32).reshape(-1) + tgt_sc = tgt_pose['bodies'].get('score', np.ones(tgt_kpt.shape[0])).astype(np.float32).reshape(-1) + + ref_shoulder_valid = (ref_sc[2] >= conf_th) and (ref_sc[5] >= conf_th) + tgt_shoulder_valid = (tgt_sc[2] >= conf_th) and (tgt_sc[5] >= conf_th) + shoulder_ok = ref_shoulder_valid and tgt_shoulder_valid + + ref_hip_valid = (ref_sc[8] >= conf_th) and (ref_sc[11] >= conf_th) + tgt_hip_valid = (tgt_sc[8] >= conf_th) and (tgt_sc[11] >= conf_th) + hip_ok = ref_hip_valid and tgt_hip_valid + + if shoulder_ok and hip_ok: + ref_shoulder_w = abs(ref_kpt[5, 0] - ref_kpt[2, 0]) + tgt_shoulder_w = abs(tgt_kpt[5, 0] - tgt_kpt[2, 0]) + x_ratio = ref_shoulder_w / tgt_shoulder_w if tgt_shoulder_w > th else 1.0 + + ref_torso_h = abs(np.mean(ref_kpt[[8, 11], 1]) - np.mean(ref_kpt[[2, 5], 1])) + tgt_torso_h = abs(np.mean(tgt_kpt[[8, 11], 1]) - np.mean(tgt_kpt[[2, 5], 1])) + y_ratio = ref_torso_h / tgt_torso_h if tgt_torso_h > th else 1.0 + scale_ratio = (x_ratio + y_ratio) / 2 + + elif shoulder_ok: + ref_sh_dist = np.linalg.norm(ref_kpt[2] - ref_kpt[5]) + tgt_sh_dist = np.linalg.norm(tgt_kpt[2] - tgt_kpt[5]) + scale_ratio = ref_sh_dist / tgt_sh_dist if tgt_sh_dist > th else 1.0 + + else: + ref_ear_dist = np.linalg.norm(ref_kpt[16] - ref_kpt[17]) + tgt_ear_dist = np.linalg.norm(tgt_kpt[16] - tgt_kpt[17]) + scale_ratio = ref_ear_dist / tgt_ear_dist if tgt_ear_dist > th else 1.0 + + if return_ratio: + return scale_ratio + + # scale + anchor_idx = 1 + anchor_pt_before_scale = tgt_kpt[anchor_idx].copy() + def scale(arr): + if arr is not None and arr.size > 0: + arr[..., 0] = anchor_pt_before_scale[0] + (arr[..., 0] - anchor_pt_before_scale[0]) * scale_ratio + arr[..., 1] = anchor_pt_before_scale[1] + (arr[..., 1] - anchor_pt_before_scale[1]) * scale_ratio + scale(tgt_kpt) + scale(aligned_pose.get('faces')) + scale(aligned_pose.get('hands')) + + # offset + offset = ref_kpt[anchor_idx] - tgt_kpt[anchor_idx] + def translate(arr): + if arr is not None and arr.size > 0: + arr += offset + translate(tgt_kpt) + translate(aligned_pose.get('faces')) + translate(aligned_pose.get('hands')) + aligned_pose['bodies']['candidate'] = tgt_kpt + + return aligned_pose, shoulder_ok, hip_ok + +# ===============================Align to Ref Driven Pose Retarget ====================== + +def align_to_reference(ref_pose_meta, tpl_pose_metas, tpl_dwposes, anchor_idx=None): + # pose retarget + face rough align + + ref_pose_dw = aaposemeta_to_dwpose(ref_pose_meta) + best_idx = anchor_idx + tpl_pose_meta_best = tpl_pose_metas[best_idx] + + tpl_retarget_pose_metas = get_retarget_pose( + tpl_pose_meta_best, + ref_pose_meta, + tpl_pose_metas, + None, None + ) + + retarget_dwposes = [aaposemeta_obj_to_dwpose(pm) for pm in tpl_retarget_pose_metas] + + if ref_pose_dw['faces'] is not None: + ref68, _ = _to_68x2(ref_pose_dw['faces']) + for frame_idx, (tpl_dw, rt_dw) in enumerate(zip(tpl_dwposes, retarget_dwposes)): + if tpl_dw['faces'] is None: + continue + src68, to_orig = _to_68x2(tpl_dw['faces']) + target_nose_pos = rt_dw['bodies']['candidate'][0] + scaled68 = _face_scale_only(src68, ref68, target_nose_pos, alpha=1.0) + rt_dw['faces'] = to_orig(scaled68) + rt_dw['faces_score'] = tpl_dw['faces_score'] + + return retarget_dwposes + +# ===============================Rescale-Ref && Change part of pose(Option)====================== + + +def compute_ratios_stepwise(ref_scores, source_scores, ref_pts, src_pts, conf_th=0.9, th=1e-6): + + def keypoint_valid(idx): + return ref_scores[0, idx] >= conf_th and source_scores[0, idx] >= conf_th + + def safe_ratio(p1, p2): + len_ref = np.linalg.norm(ref_pts[p1] - ref_pts[p2]) + len_src = np.linalg.norm(src_pts[p1] - src_pts[p2]) + if len_src > th: + return len_ref / len_src + else: + return 1.0 + + ratio_pairs = [ + (0,1),(1,2),(1,5),(2,3),(3,4),(5,6),(6,7), + (0,14),(0,15),(14,16),(15,17), + (8,9),(9,10),(11,12),(12,13), + (1,8),(1,11) + ] + ratios = {p: 1.0 for p in ratio_pairs} + + parent_map = { + (3, 4): (2, 3), + (6, 7): (5, 6), + (9, 10): (8, 9), + (12, 13): (11, 12) + } + + # Group 1 — head only + if all(keypoint_valid(i) for i in [0,1,14,15,16,17]): + ratios[(0,1)] = safe_ratio(0,1) + ratios[(0,14)] = safe_ratio(0,14) + ratios[(0,15)] = safe_ratio(0,15) + ratios[(14,16)]= safe_ratio(14,16) + ratios[(15,17)]= safe_ratio(15,17) + + # Group 2 — +shoulder + if all(keypoint_valid(i) for i in [0,1,2,5,14,15,16,17]): + ratios[(1,2)] = safe_ratio(1,2) + ratios[(1,5)] = safe_ratio(1,5) + + # Group 3 — +upper arm + if all(keypoint_valid(i) for i in [0,1,2,5,14,15,16,17,3,6]): + ratios[(2,3)] = safe_ratio(2,3) + ratios[(5,6)] = safe_ratio(5,6) + ratios[(3,4)] = ratios[parent_map[(3,4)]] + ratios[(6,7)] = ratios[parent_map[(6,7)]] + + # Group 4 — +hips + if all(keypoint_valid(i) for i in [0,1,2,5,14,15,16,17,3,6,8,11]): + ratios[(1,8)] = safe_ratio(1,8) + ratios[(1,11)] = safe_ratio(1,11) + + # Group 5 — forearm own + if all(keypoint_valid(i) for i in [0,1,2,5,14,15,16,17,3,6,8,11,4,7]): + ratios[(3,4)] = safe_ratio(3,4) + ratios[(6,7)] = safe_ratio(6,7) + + # Group 6 — knees + if all(keypoint_valid(i) for i in [0,1,2,5,14,15,16,17,3,6,8,11,4,7,9,12]): + ratios[(8,9)] = safe_ratio(8,9) + ratios[(11,12)] = safe_ratio(11,12) + ratios[(9,10)] = ratios[parent_map[(9,10)]] + ratios[(12,13)]= ratios[parent_map[(12,13)]] + + # Full body — all ratios + if all(keypoint_valid(i) for i in range(18)): + for p in ratio_pairs: + ratios[p] = safe_ratio(*p) + + symmetric_pairs = [ + ((1, 2), (1, 5)), # 两肩 + ((2, 3), (5, 6)), # 上臂 + ((3, 4), (6, 7)), # 前臂 + ((8, 9), (11, 12)), # 大腿 + ((9, 10), (12, 13)) # 小腿 + ] + for left_key, right_key in symmetric_pairs: + left_val = ratios.get(left_key) + right_val = ratios.get(right_key) + if left_val is not None and right_val is not None: + avg_val = (left_val + right_val) / 2.0 + ratios[left_key] = avg_val + ratios[right_key] = avg_val + + eye_pairs = [ + ((13, 15), (14, 16)) + ] + for left_key, right_key in eye_pairs: + left_val = ratios.get(left_key) + right_val = ratios.get(right_key) + if left_val is not None and right_val is not None: + avg_val = (left_val + right_val) / 2.0 + ratios[left_key] = avg_val + ratios[right_key] = avg_val + + return ratios + +def align_to_pose(ref_dwpose, tpl_dwposes,anchor_idx=None,conf_th=0.9,): + detected_poses = copy.deepcopy(tpl_dwposes) + + best_pose = tpl_dwposes[anchor_idx] + ref_pose_scaled, _, _ = scale_and_translate_pose(ref_dwpose, best_pose, conf_th=conf_th) + + ref_candidate = ref_pose_scaled['bodies']['candidate'].astype(np.float32) + ref_scores = ref_pose_scaled['bodies']['score'].astype(np.float32) + + source_candidate = best_pose['bodies']['candidate'].astype(np.float32) + source_scores = best_pose['bodies']['score'].astype(np.float32) + + has_ref_face = 'faces' in ref_pose_scaled and ref_pose_scaled['faces'] is not None and ref_pose_scaled['faces'].size > 0 + if has_ref_face: + try: + ref68, _ = _to_68x2(ref_pose_scaled['faces']) + except Exception as e: + print("Reference face conversion failed:", e) + has_ref_face = False + + ratios = compute_ratios_stepwise(ref_scores, source_scores, ref_candidate, source_candidate, conf_th=conf_th, th=1e-6) + + for pose in detected_poses: + candidate = pose['bodies']['candidate'] + hands = pose['hands'] + + # ===== Neck ===== + ratio = ratios[(0, 1)] + x_offset = (candidate[1][0] - candidate[0][0]) * (1. - ratio) + y_offset = (candidate[1][1] - candidate[0][1]) * (1. - ratio) + candidate[[0, 14, 15, 16, 17], 0] += x_offset + candidate[[0, 14, 15, 16, 17], 1] += y_offset + + # ===== Shoulder Right ===== + ratio = ratios[(1, 2)] + x_offset = (candidate[1][0] - candidate[2][0]) * (1. - ratio) + y_offset = (candidate[1][1] - candidate[2][1]) * (1. - ratio) + candidate[[2, 3, 4], 0] += x_offset + candidate[[2, 3, 4], 1] += y_offset + hands[1, :, 0] += x_offset + hands[1, :, 1] += y_offset + + # ===== Shoulder Left ===== + ratio = ratios[(1, 5)] + x_offset = (candidate[1][0] - candidate[5][0]) * (1. - ratio) + y_offset = (candidate[1][1] - candidate[5][1]) * (1. - ratio) + candidate[[5, 6, 7], 0] += x_offset + candidate[[5, 6, 7], 1] += y_offset + hands[0, :, 0] += x_offset + hands[0, :, 1] += y_offset + + # ===== Upper Arm Right ===== + ratio = ratios[(2, 3)] + x_offset = (candidate[2][0] - candidate[3][0]) * (1. - ratio) + y_offset = (candidate[2][1] - candidate[3][1]) * (1. - ratio) + candidate[[3, 4], 0] += x_offset + candidate[[3, 4], 1] += y_offset + hands[1, :, 0] += x_offset + hands[1, :, 1] += y_offset + + # ===== Forearm Right ===== + ratio = ratios[(3, 4)] + x_offset = (candidate[3][0] - candidate[4][0]) * (1. - ratio) + y_offset = (candidate[3][1] - candidate[4][1]) * (1. - ratio) + candidate[4, 0] += x_offset + candidate[4, 1] += y_offset + hands[1, :, 0] += x_offset + hands[1, :, 1] += y_offset + + # ===== Upper Arm Left ===== + ratio = ratios[(5, 6)] + x_offset = (candidate[5][0] - candidate[6][0]) * (1. - ratio) + y_offset = (candidate[5][1] - candidate[6][1]) * (1. - ratio) + candidate[[6, 7], 0] += x_offset + candidate[[6, 7], 1] += y_offset + hands[0, :, 0] += x_offset + hands[0, :, 1] += y_offset + + # ===== Forearm Left ===== + ratio = ratios[(6, 7)] + x_offset = (candidate[6][0] - candidate[7][0]) * (1. - ratio) + y_offset = (candidate[6][1] - candidate[7][1]) * (1. - ratio) + candidate[7, 0] += x_offset + candidate[7, 1] += y_offset + hands[0, :, 0] += x_offset + hands[0, :, 1] += y_offset + + # ===== Head parts ===== + for (p1, p2) in [(0,14),(0,15),(14,16),(15,17)]: + ratio = ratios[(p1,p2)] + x_offset = (candidate[p1][0] - candidate[p2][0]) * (1. - ratio) + y_offset = (candidate[p1][1] - candidate[p2][1]) * (1. - ratio) + candidate[p2, 0] += x_offset + candidate[p2, 1] += y_offset + + # ===== Hips (added) ===== + ratio = ratios[(1, 8)] + x_offset = (candidate[1][0] - candidate[8][0]) * (1. - ratio) + y_offset = (candidate[1][1] - candidate[8][1]) * (1. - ratio) + candidate[8, 0] += x_offset + candidate[8, 1] += y_offset + + ratio = ratios[(1, 11)] + x_offset = (candidate[1][0] - candidate[11][0]) * (1. - ratio) + y_offset = (candidate[1][1] - candidate[11][1]) * (1. - ratio) + candidate[11, 0] += x_offset + candidate[11, 1] += y_offset + + # ===== Legs ===== + ratio = ratios[(8, 9)] + x_offset = (candidate[9][0] - candidate[8][0]) * (ratio - 1.) + y_offset = (candidate[9][1] - candidate[8][1]) * (ratio - 1.) + candidate[[9, 10], 0] += x_offset + candidate[[9, 10], 1] += y_offset + + ratio = ratios[(9, 10)] + x_offset = (candidate[10][0] - candidate[9][0]) * (ratio - 1.) + y_offset = (candidate[10][1] - candidate[9][1]) * (ratio - 1.) + candidate[10, 0] += x_offset + candidate[10, 1] += y_offset + + ratio = ratios[(11, 12)] + x_offset = (candidate[12][0] - candidate[11][0]) * (ratio - 1.) + y_offset = (candidate[12][1] - candidate[11][1]) * (ratio - 1.) + candidate[[12, 13], 0] += x_offset + candidate[[12, 13], 1] += y_offset + + ratio = ratios[(12, 13)] + x_offset = (candidate[13][0] - candidate[12][0]) * (ratio - 1.) + y_offset = (candidate[13][1] - candidate[12][1]) * (ratio - 1.) + candidate[13, 0] += x_offset + candidate[13, 1] += y_offset + + # rough align + if has_ref_face and 'faces' in pose and pose['faces'] is not None and pose['faces'].size > 0: + try: + src68, to_orig = _to_68x2(pose['faces']) + scaled68 = _face_scale_only(src68, ref68, candidate[0], alpha=1.0) + pose['faces'] = to_orig(scaled68) + except Exception as e: + print("Reference face conversion failed:", e) + continue + + return detected_poses diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/onetoall/utils.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/onetoall/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..78d81c820bd05860e419f7c507691932ee90870e --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/onetoall/utils.py @@ -0,0 +1,347 @@ +# https://github.com/ssj9596/One-to-All-Animation + +import cv2 +import numpy as np +import math +import copy + +eps = 0.01 + +DROP_FACE_POINTS = {0, 14, 15, 16, 17} +DROP_UPPER_POINTS = {0, 14, 15, 16, 17, 2, 1, 5, 3, 6} +DROP_LOWER_POINTS = {8, 9, 10, 11, 12, 13} + +def scale_and_translate_pose(tgt_pose, ref_pose, conf_th=0.9, return_ratio=False): + aligned_pose = copy.deepcopy(tgt_pose) + th = 1e-6 + ref_kpt = ref_pose['bodies']['candidate'].astype(np.float32) + tgt_kpt = aligned_pose['bodies']['candidate'].astype(np.float32) + + ref_sc = ref_pose['bodies'].get('score', np.ones(ref_kpt.shape[0])).astype(np.float32).reshape(-1) + tgt_sc = tgt_pose['bodies'].get('score', np.ones(tgt_kpt.shape[0])).astype(np.float32).reshape(-1) + + ref_shoulder_valid = (ref_sc[2] >= conf_th) and (ref_sc[5] >= conf_th) + tgt_shoulder_valid = (tgt_sc[2] >= conf_th) and (tgt_sc[5] >= conf_th) + shoulder_ok = ref_shoulder_valid and tgt_shoulder_valid + + ref_hip_valid = (ref_sc[8] >= conf_th) and (ref_sc[11] >= conf_th) + tgt_hip_valid = (tgt_sc[8] >= conf_th) and (tgt_sc[11] >= conf_th) + hip_ok = ref_hip_valid and tgt_hip_valid + + if shoulder_ok and hip_ok: + ref_shoulder_w = abs(ref_kpt[5, 0] - ref_kpt[2, 0]) + tgt_shoulder_w = abs(tgt_kpt[5, 0] - tgt_kpt[2, 0]) + x_ratio = ref_shoulder_w / tgt_shoulder_w if tgt_shoulder_w > th else 1.0 + + ref_torso_h = abs(np.mean(ref_kpt[[8, 11], 1]) - np.mean(ref_kpt[[2, 5], 1])) + tgt_torso_h = abs(np.mean(tgt_kpt[[8, 11], 1]) - np.mean(tgt_kpt[[2, 5], 1])) + y_ratio = ref_torso_h / tgt_torso_h if tgt_torso_h > th else 1.0 + scale_ratio = (x_ratio + y_ratio) / 2 + + elif shoulder_ok: + ref_sh_dist = np.linalg.norm(ref_kpt[2] - ref_kpt[5]) + tgt_sh_dist = np.linalg.norm(tgt_kpt[2] - tgt_kpt[5]) + scale_ratio = ref_sh_dist / tgt_sh_dist if tgt_sh_dist > th else 1.0 + + else: + ref_ear_dist = np.linalg.norm(ref_kpt[16] - ref_kpt[17]) + tgt_ear_dist = np.linalg.norm(tgt_kpt[16] - tgt_kpt[17]) + scale_ratio = ref_ear_dist / tgt_ear_dist if tgt_ear_dist > th else 1.0 + + if return_ratio: + return scale_ratio + + # scale + anchor_idx = 1 + anchor_pt_before_scale = tgt_kpt[anchor_idx].copy() + def scale(arr): + if arr is not None and arr.size > 0: + arr[..., 0] = anchor_pt_before_scale[0] + (arr[..., 0] - anchor_pt_before_scale[0]) * scale_ratio + arr[..., 1] = anchor_pt_before_scale[1] + (arr[..., 1] - anchor_pt_before_scale[1]) * scale_ratio + scale(tgt_kpt) + scale(aligned_pose.get('faces')) + scale(aligned_pose.get('hands')) + + # offset + offset = ref_kpt[anchor_idx] - tgt_kpt[anchor_idx] + def translate(arr): + if arr is not None and arr.size > 0: + arr += offset + translate(tgt_kpt) + translate(aligned_pose.get('faces')) + translate(aligned_pose.get('hands')) + aligned_pose['bodies']['candidate'] = tgt_kpt + + return aligned_pose, shoulder_ok, hip_ok + + +def warp_ref_to_pose(tgt_img, + ref_pose: dict, #driven pose + tgt_pose: dict, + bg_val=(0, 0, 0), + conf_th=0.9, + align_center=False): + + H, W = tgt_img.shape[:2] + img_tgt_pose = draw_pose_aligned(tgt_pose, H, W, without_face=True) + + tgt_kpt = tgt_pose['bodies']['candidate'].astype(np.float32) + ref_kpt = ref_pose['bodies']['candidate'].astype(np.float32) + + scale_ratio = scale_and_translate_pose(tgt_pose, ref_pose, conf_th=conf_th, return_ratio=True) + + anchor_idx = 1 + x0 = tgt_kpt[anchor_idx][0] * W + y0 = tgt_kpt[anchor_idx][1] * H + + ref_x = ref_kpt[anchor_idx][0] * W if not align_center else W/2 + ref_y = ref_kpt[anchor_idx][1] * H + + dx = ref_x - x0 + dy = ref_y - y0 + + # Affine transformation matrix + M = np.array([[scale_ratio, 0, (1-scale_ratio)*x0 + dx], + [0, scale_ratio, (1-scale_ratio)*y0 + dy]], + dtype=np.float32) + img_warp = cv2.warpAffine(tgt_img, M, (W, H), + flags=cv2.INTER_LINEAR, + borderValue=bg_val) + img_tgt_pose_warp = cv2.warpAffine(img_tgt_pose, M, (W, H), + flags=cv2.INTER_LINEAR, + borderValue=bg_val) + zeros = np.zeros((H, W), dtype=np.uint8) + mask_warp = cv2.warpAffine(zeros, M, (W, H), + flags=cv2.INTER_NEAREST, + borderValue=255) + return img_warp, img_tgt_pose_warp, mask_warp + +def hsv_to_rgb(hsv): + hsv = np.asarray(hsv, dtype=np.float32) + in_shape = hsv.shape + hsv = hsv.reshape(-1, 3) + + h, s, v = hsv[:, 0], hsv[:, 1], hsv[:, 2] + + i = (h * 6.0).astype(int) + f = (h * 6.0) - i + i = i % 6 + + p = v * (1.0 - s) + q = v * (1.0 - s * f) + t = v * (1.0 - s * (1.0 - f)) + + rgb = np.zeros_like(hsv) + rgb[i == 0] = np.stack([v[i == 0], t[i == 0], p[i == 0]], axis=1) + rgb[i == 1] = np.stack([q[i == 1], v[i == 1], p[i == 1]], axis=1) + rgb[i == 2] = np.stack([p[i == 2], v[i == 2], t[i == 2]], axis=1) + rgb[i == 3] = np.stack([p[i == 3], q[i == 3], v[i == 3]], axis=1) + rgb[i == 4] = np.stack([t[i == 4], p[i == 4], v[i == 4]], axis=1) + rgb[i == 5] = np.stack([v[i == 5], p[i == 5], q[i == 5]], axis=1) + + gray_mask = s == 0 + rgb[gray_mask] = np.stack([v[gray_mask]] * 3, axis=1) + + return (rgb.reshape(in_shape) * 255) + +def get_stickwidth(W, H, stickwidth=4): + if max(W, H) < 512: + ratio = 1.0 + elif max(W, H) < 1080: + ratio = 1.5 + elif max(W, H) < 2160: + ratio = 2.0 + elif max(W, H) < 3240: + ratio = 2.5 + elif max(W, H) < 4320: + ratio = 3.5 + elif max(W, H) < 5400: + ratio = 4.5 + else: + ratio = 4.0 + return int(stickwidth * ratio) + + +def alpha_blend_color(color, alpha): + return [int(c * alpha) for c in color] + + +def draw_bodypose_aligned(canvas, candidate, subset, score, plan=None): + H, W, C = canvas.shape + candidate = np.array(candidate) + subset = np.array(subset) + stickwidth = get_stickwidth(W, H, stickwidth=3) + + limbSeq = [ + [2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], + [2, 9], [9, 10], [10, 11], [2, 12], [12, 13], [13, 14], + [2, 1], [1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18]] + colors = [ + [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], + [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], + [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + + HIDE_JOINTS = set() + stretch_limb_idx = None + stretch_scale = None + if plan: + if plan["mode"] == "drop_point": + HIDE_JOINTS.add(plan["point_idx"]) + elif plan["mode"] == "drop_region": + HIDE_JOINTS |= set(plan["points"]) + elif plan["mode"] == "stretch_limb": + stretch_limb_idx = plan["limb_idx"] + stretch_scale = plan["stretch_scale"] + + hide_joint = np.zeros_like(subset, dtype=bool) + + for i in range(17): + for n in range(len(subset)): + idx_pair = limbSeq[i] + + if any(j in HIDE_JOINTS for j in idx_pair): + continue + + index = subset[n][np.array(idx_pair) - 1] + conf = score[n][np.array(idx_pair) - 1] + if -1 in index: + continue + # color lighten + alpha = max(conf[0] * conf[1], 0) if conf[0]>0 and conf[1]>0 else 0.35 + if conf[0] == 0 or conf[1] == 0: + alpha = 0 + + Y = candidate[index.astype(int), 0] * float(W) + X = candidate[index.astype(int), 1] * float(H) + + if stretch_limb_idx == i: + vec_x = X[1] - X[0] + vec_y = Y[1] - Y[0] + X[1] = X[0] + vec_x * stretch_scale + Y[1] = Y[0] + vec_y * stretch_scale + hide_joint[n, idx_pair[1]-1] = True + + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0]-X[1])**2 + (Y[0]-Y[1])**2) ** 0.5 + angle = math.degrees(math.atan2(X[0]-X[1], Y[0]-Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), + (int(length/2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(canvas, polygon, alpha_blend_color(colors[i], alpha)) + + canvas = (canvas * 0.6).astype(np.uint8) + + for i in range(18): + if i in HIDE_JOINTS: + continue + for n in range(len(subset)): + if hide_joint[n, i]: + continue + index = int(subset[n][i]) + if index == -1: + continue + x, y = candidate[index][0:2] + conf = score[n][i] + + alpha = 0 if conf==-2 else max(conf, 0) + x = int(x * W) + y = int(y * H) + cv2.circle(canvas, (x, y), stickwidth, alpha_blend_color(colors[i], alpha), thickness=-1) + + return canvas + + +def draw_handpose_aligned(canvas, all_hand_peaks, all_hand_scores, draw_th=0.3): + H, W, C = canvas.shape + stickwidth = get_stickwidth(W, H, stickwidth=2) + line_thickness = get_stickwidth(W, H, stickwidth=2) + + edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ + [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] + + for peaks, scores in zip(all_hand_peaks, all_hand_scores): + for ie, e in enumerate(edges): + if scores[e[0]] < draw_th or scores[e[1]] < draw_th: + continue + x1, y1 = peaks[e[0]] + x2, y2 = peaks[e[1]] + x1 = int(x1 * W) + y1 = int(y1 * H) + x2 = int(x2 * W) + y2 = int(y2 * H) + + score = int(scores[e[0]] * scores[e[1]] * 255) + if x1 > eps and y1 > eps and x2 > eps and y2 > eps: + color = hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]).flatten() + color = tuple(int(c * score / 255) for c in color) + cv2.line(canvas, (x1, y1), (x2, y2), color, thickness=line_thickness) + + for i, keyponit in enumerate(peaks): + if scores[i] < draw_th: + continue + + x, y = keyponit + x = int(x * W) + y = int(y * H) + score = int(scores[i] * 255) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), stickwidth, (0, 0, score), thickness=-1) + return canvas + + +def draw_facepose_aligned(canvas, all_lmks, all_scores, draw_th=0.3,face_change=False): + H, W, C = canvas.shape + stickwidth = get_stickwidth(W, H, stickwidth=2) + SKIP_IDX = set(range(0, 17)) + SKIP_IDX |= set(range(27, 36)) + + for lmks, scores in zip(all_lmks, all_scores): + for idx, (lmk, score) in enumerate(zip(lmks, scores)): + # skip chin + if idx in SKIP_IDX: + continue + if score < draw_th: + continue + x, y = lmk + x = int(x * W) + y = int(y * H) + conf = int(score * 255) + # color lighten + if face_change: + conf = int(conf * 0.35) + + if x > eps and y > eps: + cv2.circle(canvas, (x, y), stickwidth, (conf, conf, conf), thickness=-1) + return canvas + + +def draw_pose_aligned(pose, H, W, ref_w=2160, without_face=False, pose_plan=None, head_strength="full", face_change=False): + bodies = pose['bodies'] + faces = pose['faces'] + hands = pose['hands'] + candidate = bodies['candidate'] + subset = bodies['subset'] + body_score = bodies['score'].copy() + # control color + if head_strength == "weak": + target_joints = [0, 14, 15, 16, 17] + body_score[:, target_joints] = -2 + elif head_strength == "none": + target_joints = [0, 14, 15, 16, 17] + body_score[:, target_joints] = 0 + + sz = min(H, W) + sr = (ref_w / sz) if sz != ref_w else 1 + canvas = np.zeros(shape=(int(H*sr), int(W*sr), 3), dtype=np.uint8) + + canvas = draw_bodypose_aligned(canvas, candidate, subset, + score=body_score, + plan=pose_plan,) + + canvas = draw_handpose_aligned(canvas, hands, pose['hands_score']) + + if not without_face: + canvas = draw_facepose_aligned(canvas, faces, pose['faces_score'],face_change=face_change) + + return cv2.resize(canvas, (W, H)) diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/__pycache__/human_visualization.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/__pycache__/human_visualization.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92c28c69b24c2a98692d1aed0932c034e35d6949 Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/__pycache__/human_visualization.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/__pycache__/pose2d_utils.cpython-313.pyc b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/__pycache__/pose2d_utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9737d41ec9db32fc98be09d83ee248a829c52496 Binary files /dev/null and b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/__pycache__/pose2d_utils.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/human_visualization.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/human_visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..e6f5ad1f5846ba5317872eff0861e474d5410490 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/human_visualization.py @@ -0,0 +1,1272 @@ +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. +import os +import cv2 +import time +import math +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from typing import Dict, List +import random +from .pose2d_utils import AAPoseMeta + + +def draw_handpose(canvas, keypoints, hand_score_th=0.6): + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + eps = 0.01 + + H, W, C = canvas.shape + stickwidth = max(int(min(H, W) / 200), 1) + + edges = [ + [0, 1], + [1, 2], + [2, 3], + [3, 4], + [0, 5], + [5, 6], + [6, 7], + [7, 8], + [0, 9], + [9, 10], + [10, 11], + [11, 12], + [0, 13], + [13, 14], + [14, 15], + [15, 16], + [0, 17], + [17, 18], + [18, 19], + [19, 20], + ] + + for ie, (e1, e2) in enumerate(edges): + k1 = keypoints[e1] + k2 = keypoints[e2] + if k1 is None or k2 is None: + continue + if k1[2] < hand_score_th or k2[2] < hand_score_th: + continue + + x1 = int(k1[0]) + y1 = int(k1[1]) + x2 = int(k2[0]) + y2 = int(k2[1]) + if x1 > eps and y1 > eps and x2 > eps and y2 > eps: + cv2.line( + canvas, + (x1, y1), + (x2, y2), + matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, + thickness=stickwidth, + ) + + for keypoint in keypoints: + + if keypoint is None: + continue + if keypoint[2] < hand_score_th: + continue + + x, y = keypoint[0], keypoint[1] + x = int(x) + y = int(y) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), stickwidth, (0, 0, 255), thickness=-1) + return canvas + + +def draw_handpose_new(canvas, keypoints, stickwidth_type='v2', hand_score_th=0.6, hand_stick_width=4): + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + eps = 0.01 + + H, W, C = canvas.shape + # if stickwidth_type == 'v1': + # stickwidth = max(int(min(H, W) / 200), 1) + # elif stickwidth_type == 'v2': + # stickwidth = max(max(int(min(H, W) / 200) - 1, 1) // 2, 1) + if hand_stick_width == -1: + stickwidth = max(max(int(min(H, W) / 200) - 1, 1) // 2, 1) + else: + stickwidth = hand_stick_width + + edges = [ + [0, 1], + [1, 2], + [2, 3], + [3, 4], + [0, 5], + [5, 6], + [6, 7], + [7, 8], + [0, 9], + [9, 10], + [10, 11], + [11, 12], + [0, 13], + [13, 14], + [14, 15], + [15, 16], + [0, 17], + [17, 18], + [18, 19], + [19, 20], + ] + + for ie, (e1, e2) in enumerate(edges): + k1 = keypoints[e1] + k2 = keypoints[e2] + if k1 is None or k2 is None: + continue + if k1[2] < hand_score_th or k2[2] < hand_score_th: + continue + + x1 = int(k1[0]) + y1 = int(k1[1]) + x2 = int(k2[0]) + y2 = int(k2[1]) + if x1 > eps and y1 > eps and x2 > eps and y2 > eps: + cv2.line( + canvas, + (x1, y1), + (x2, y2), + matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, + thickness=stickwidth, + ) + + for keypoint in keypoints: + + if keypoint is None: + continue + if keypoint[2] < hand_score_th: + continue + + x, y = keypoint[0], keypoint[1] + x = int(x) + y = int(y) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), stickwidth, (0, 0, 255), thickness=-1) + return canvas + + +def draw_ellipse_by_2kp(img, keypoint1, keypoint2, color, threshold=0.6): + H, W, C = img.shape + stickwidth = max(int(min(H, W) / 200), 1) + + if keypoint1[-1] < threshold or keypoint2[-1] < threshold: + return img + + Y = np.array([keypoint1[0], keypoint2[0]]) + X = np.array([keypoint1[1], keypoint2[1]]) + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(img, polygon, [int(float(c) * 0.6) for c in color]) + return img + + +def split_pose2d_kps_to_aa(kp2ds: np.ndarray) -> List[np.ndarray]: + """Convert the 133 keypoints from pose2d to body and hands keypoints. + + Args: + kp2ds (np.ndarray): [133, 2] + + Returns: + List[np.ndarray]: _description_ + """ + kp2ds_body = ( + kp2ds[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + + kp2ds[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]] + ) / 2 + kp2ds_lhand = kp2ds[91:112] + kp2ds_rhand = kp2ds[112:133] + return kp2ds_body.copy(), kp2ds_lhand.copy(), kp2ds_rhand.copy() + + +def draw_aapose_by_meta(img, meta: AAPoseMeta, threshold=0.5, stick_width_norm=200, draw_hand=True, draw_head=True): + kp2ds = np.concatenate([meta.kps_body, meta.kps_body_p[:, None]], axis=1) + kp2ds_lhand = np.concatenate([meta.kps_lhand, meta.kps_lhand_p[:, None]], axis=1) + kp2ds_rhand = np.concatenate([meta.kps_rhand, meta.kps_rhand_p[:, None]], axis=1) + pose_img = draw_aapose(img, kp2ds, threshold, kp2ds_lhand=kp2ds_lhand, kp2ds_rhand=kp2ds_rhand, stick_width_norm=stick_width_norm, draw_hand=draw_hand, draw_head=draw_head) + return pose_img + +def draw_aapose_by_meta_new(img, meta: AAPoseMeta, threshold=0.5, stickwidth_type='v2', body_stick_width=-1, draw_hand=True, draw_head=True, hand_stick_width=4): + kp2ds = np.concatenate([meta.kps_body, meta.kps_body_p[:, None]], axis=1) + kp2ds_lhand = np.concatenate([meta.kps_lhand, meta.kps_lhand_p[:, None]], axis=1) + kp2ds_rhand = np.concatenate([meta.kps_rhand, meta.kps_rhand_p[:, None]], axis=1) + pose_img = draw_aapose_new(img, kp2ds, threshold, kp2ds_lhand=kp2ds_lhand, kp2ds_rhand=kp2ds_rhand, body_stick_width=body_stick_width, + stickwidth_type=stickwidth_type, draw_hand=draw_hand, draw_head=draw_head, hand_stick_width=hand_stick_width) + return pose_img + +def draw_hand_by_meta(img, meta: AAPoseMeta, threshold=0.5, stick_width_norm=200): + kp2ds = np.concatenate([meta.kps_body, meta.kps_body_p[:, None] * 0], axis=1) + kp2ds_lhand = np.concatenate([meta.kps_lhand, meta.kps_lhand_p[:, None]], axis=1) + kp2ds_rhand = np.concatenate([meta.kps_rhand, meta.kps_rhand_p[:, None]], axis=1) + pose_img = draw_aapose(img, kp2ds, threshold, kp2ds_lhand=kp2ds_lhand, kp2ds_rhand=kp2ds_rhand, stick_width_norm=stick_width_norm, draw_hand=True, draw_head=False) + return pose_img + + +def draw_aaface_by_meta(img, meta: AAPoseMeta, threshold=0.5, stick_width_norm=200, draw_hand=False, draw_head=True): + kp2ds = np.concatenate([meta.kps_body, meta.kps_body_p[:, None]], axis=1) + # kp2ds_lhand = np.concatenate([meta.kps_lhand, meta.kps_lhand_p[:, None]], axis=1) + # kp2ds_rhand = np.concatenate([meta.kps_rhand, meta.kps_rhand_p[:, None]], axis=1) + pose_img = draw_M(img, kp2ds, threshold, kp2ds_lhand=None, kp2ds_rhand=None, stick_width_norm=stick_width_norm, draw_hand=draw_hand, draw_head=draw_head) + return pose_img + + +def draw_aanose_by_meta(img, meta: AAPoseMeta, threshold=0.5, stick_width_norm=100, draw_hand=False): + kp2ds = np.concatenate([meta.kps_body, meta.kps_body_p[:, None]], axis=1) + # kp2ds_lhand = np.concatenate([meta.kps_lhand, meta.kps_lhand_p[:, None]], axis=1) + # kp2ds_rhand = np.concatenate([meta.kps_rhand, meta.kps_rhand_p[:, None]], axis=1) + pose_img = draw_nose(img, kp2ds, threshold, kp2ds_lhand=None, kp2ds_rhand=None, stick_width_norm=stick_width_norm, draw_hand=draw_hand) + return pose_img + + +def gen_face_motion_seq(img, metas: List[AAPoseMeta], threshold=0.5, stick_width_norm=200): + + return + + +def draw_M( + img, + kp2ds, + threshold=0.6, + data_to_json=None, + idx=-1, + kp2ds_lhand=None, + kp2ds_rhand=None, + draw_hand=False, + stick_width_norm=200, + draw_head=True +): + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + + new_kep_list = [ + "Nose", + "Neck", + "RShoulder", + "RElbow", + "RWrist", # No.4 + "LShoulder", + "LElbow", + "LWrist", # No.7 + "RHip", + "RKnee", + "RAnkle", # No.10 + "LHip", + "LKnee", + "LAnkle", # No.13 + "REye", + "LEye", + "REar", + "LEar", + "LToe", + "RToe", + ] + # kp2ds_body = (kp2ds.copy()[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + \ + # kp2ds.copy()[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]]) / 2 + kp2ds = kp2ds.copy() + # import ipdb; ipdb.set_trace() + kp2ds[[1,2,3,4,5,6,7,8,9,10,11,12,13,18,19], 2] = 0 + if not draw_head: + kp2ds[[0,14,15,16,17], 2] = 0 + kp2ds_body = kp2ds + # kp2ds_body = kp2ds_body[:18] + + # kp2ds_lhand = kp2ds.copy()[91:112] + # kp2ds_rhand = kp2ds.copy()[112:133] + + limbSeq = [ + # [2, 3], + # [2, 6], # shoulders + # [3, 4], + # [4, 5], # left arm + # [6, 7], + # [7, 8], # right arm + # [2, 9], + # [9, 10], + # [10, 11], # right leg + # [2, 12], + # [12, 13], + # [13, 14], # left leg + # [2, 1], + [1, 15], + [15, 17], + [1, 16], + [16, 18], # face (nose, eyes, ears) + # [14, 19], + # [11, 20], # foot + ] + + colors = [ + # [255, 0, 0], + # [255, 85, 0], + # [255, 170, 0], + # [255, 255, 0], + # [170, 255, 0], + # [85, 255, 0], + # [0, 255, 0], + # [0, 255, 85], + # [0, 255, 170], + # [0, 255, 255], + # [0, 170, 255], + # [0, 85, 255], + # [0, 0, 255], + # [85, 0, 255], + [170, 0, 255], + [255, 0, 255], + [255, 0, 170], + [255, 0, 85], + # foot + # [200, 200, 0], + # [100, 100, 0], + ] + + H, W, C = img.shape + stickwidth = max(int(min(H, W) / stick_width_norm), 1) + + for _idx, ((k1_index, k2_index), color) in enumerate(zip(limbSeq, colors)): + keypoint1 = kp2ds_body[k1_index - 1] + keypoint2 = kp2ds_body[k2_index - 1] + + if keypoint1[-1] < threshold or keypoint2[-1] < threshold: + continue + + Y = np.array([keypoint1[0], keypoint2[0]]) + X = np.array([keypoint1[1], keypoint2[1]]) + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(img, polygon, [int(float(c) * 0.6) for c in color]) + + for _idx, (keypoint, color) in enumerate(zip(kp2ds_body, colors)): + if keypoint[-1] < threshold: + continue + x, y = keypoint[0], keypoint[1] + # cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) + cv2.circle(img, (int(x), int(y)), stickwidth, color, thickness=-1) + + if draw_hand: + img = draw_handpose(img, kp2ds_lhand, hand_score_th=threshold) + img = draw_handpose(img, kp2ds_rhand, hand_score_th=threshold) + + kp2ds_body[:, 0] /= W + kp2ds_body[:, 1] /= H + + if data_to_json is not None: + if idx == -1: + data_to_json.append( + { + "image_id": "frame_{:05d}.jpg".format(len(data_to_json) + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + ) + else: + data_to_json[idx] = { + "image_id": "frame_{:05d}.jpg".format(idx + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + return img + + +def draw_nose( + img, + kp2ds, + threshold=0.6, + data_to_json=None, + idx=-1, + kp2ds_lhand=None, + kp2ds_rhand=None, + draw_hand=False, + stick_width_norm=200, +): + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + + new_kep_list = [ + "Nose", + "Neck", + "RShoulder", + "RElbow", + "RWrist", # No.4 + "LShoulder", + "LElbow", + "LWrist", # No.7 + "RHip", + "RKnee", + "RAnkle", # No.10 + "LHip", + "LKnee", + "LAnkle", # No.13 + "REye", + "LEye", + "REar", + "LEar", + "LToe", + "RToe", + ] + # kp2ds_body = (kp2ds.copy()[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + \ + # kp2ds.copy()[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]]) / 2 + kp2ds = kp2ds.copy() + kp2ds[1:, 2] = 0 + # kp2ds[0, 2] = 1 + kp2ds_body = kp2ds + # kp2ds_body = kp2ds_body[:18] + + # kp2ds_lhand = kp2ds.copy()[91:112] + # kp2ds_rhand = kp2ds.copy()[112:133] + + limbSeq = [ + # [2, 3], + # [2, 6], # shoulders + # [3, 4], + # [4, 5], # left arm + # [6, 7], + # [7, 8], # right arm + # [2, 9], + # [9, 10], + # [10, 11], # right leg + # [2, 12], + # [12, 13], + # [13, 14], # left leg + # [2, 1], + [1, 15], + [15, 17], + [1, 16], + [16, 18], # face (nose, eyes, ears) + # [14, 19], + # [11, 20], # foot + ] + + colors = [ + # [255, 0, 0], + # [255, 85, 0], + # [255, 170, 0], + # [255, 255, 0], + # [170, 255, 0], + # [85, 255, 0], + # [0, 255, 0], + # [0, 255, 85], + # [0, 255, 170], + # [0, 255, 255], + # [0, 170, 255], + # [0, 85, 255], + # [0, 0, 255], + # [85, 0, 255], + [170, 0, 255], + # [255, 0, 255], + # [255, 0, 170], + # [255, 0, 85], + # foot + # [200, 200, 0], + # [100, 100, 0], + ] + + H, W, C = img.shape + stickwidth = max(int(min(H, W) / stick_width_norm), 1) + + # for _idx, ((k1_index, k2_index), color) in enumerate(zip(limbSeq, colors)): + # keypoint1 = kp2ds_body[k1_index - 1] + # keypoint2 = kp2ds_body[k2_index - 1] + + # if keypoint1[-1] < threshold or keypoint2[-1] < threshold: + # continue + + # Y = np.array([keypoint1[0], keypoint2[0]]) + # X = np.array([keypoint1[1], keypoint2[1]]) + # mX = np.mean(X) + # mY = np.mean(Y) + # length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + # angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + # polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + # cv2.fillConvexPoly(img, polygon, [int(float(c) * 0.6) for c in color]) + + for _idx, (keypoint, color) in enumerate(zip(kp2ds_body, colors)): + if keypoint[-1] < threshold: + continue + x, y = keypoint[0], keypoint[1] + # cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) + cv2.circle(img, (int(x), int(y)), stickwidth, color, thickness=-1) + + if draw_hand: + img = draw_handpose(img, kp2ds_lhand, hand_score_th=threshold) + img = draw_handpose(img, kp2ds_rhand, hand_score_th=threshold) + + kp2ds_body[:, 0] /= W + kp2ds_body[:, 1] /= H + + if data_to_json is not None: + if idx == -1: + data_to_json.append( + { + "image_id": "frame_{:05d}.jpg".format(len(data_to_json) + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + ) + else: + data_to_json[idx] = { + "image_id": "frame_{:05d}.jpg".format(idx + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + return img + + +def draw_aapose( + img, + kp2ds, + threshold=0.6, + data_to_json=None, + idx=-1, + kp2ds_lhand=None, + kp2ds_rhand=None, + draw_hand=False, + stick_width_norm=200, + draw_head=True +): + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + + new_kep_list = [ + "Nose", + "Neck", + "RShoulder", + "RElbow", + "RWrist", # No.4 + "LShoulder", + "LElbow", + "LWrist", # No.7 + "RHip", + "RKnee", + "RAnkle", # No.10 + "LHip", + "LKnee", + "LAnkle", # No.13 + "REye", + "LEye", + "REar", + "LEar", + "LToe", + "RToe", + ] + # kp2ds_body = (kp2ds.copy()[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + \ + # kp2ds.copy()[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]]) / 2 + kp2ds = kp2ds.copy() + if not draw_head: + kp2ds[[0,14,15,16,17], 2] = 0 + kp2ds_body = kp2ds + + # kp2ds_lhand = kp2ds.copy()[91:112] + # kp2ds_rhand = kp2ds.copy()[112:133] + + limbSeq = [ + [2, 3], + [2, 6], # shoulders + [3, 4], + [4, 5], # left arm + [6, 7], + [7, 8], # right arm + [2, 9], + [9, 10], + [10, 11], # right leg + [2, 12], + [12, 13], + [13, 14], # left leg + [2, 1], + [1, 15], + [15, 17], + [1, 16], + [16, 18], # face (nose, eyes, ears) + [14, 19], + [11, 20], # foot + ] + + colors = [ + [255, 0, 0], + [255, 85, 0], + [255, 170, 0], + [255, 255, 0], + [170, 255, 0], + [85, 255, 0], + [0, 255, 0], + [0, 255, 85], + [0, 255, 170], + [0, 255, 255], + [0, 170, 255], + [0, 85, 255], + [0, 0, 255], + [85, 0, 255], + [170, 0, 255], + [255, 0, 255], + [255, 0, 170], + [255, 0, 85], + # foot + [200, 200, 0], + [100, 100, 0], + ] + + H, W, C = img.shape + stickwidth = max(int(min(H, W) / stick_width_norm), 1) + + for _idx, ((k1_index, k2_index), color) in enumerate(zip(limbSeq, colors)): + keypoint1 = kp2ds_body[k1_index - 1] + keypoint2 = kp2ds_body[k2_index - 1] + + if keypoint1[-1] < threshold or keypoint2[-1] < threshold: + continue + + Y = np.array([keypoint1[0], keypoint2[0]]) + X = np.array([keypoint1[1], keypoint2[1]]) + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(img, polygon, [int(float(c) * 0.6) for c in color]) + + for _idx, (keypoint, color) in enumerate(zip(kp2ds_body, colors)): + if keypoint[-1] < threshold: + continue + x, y = keypoint[0], keypoint[1] + # cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) + cv2.circle(img, (int(x), int(y)), stickwidth, color, thickness=-1) + + if draw_hand: + img = draw_handpose(img, kp2ds_lhand, hand_score_th=threshold) + img = draw_handpose(img, kp2ds_rhand, hand_score_th=threshold) + + kp2ds_body[:, 0] /= W + kp2ds_body[:, 1] /= H + + if data_to_json is not None: + if idx == -1: + data_to_json.append( + { + "image_id": "frame_{:05d}.jpg".format(len(data_to_json) + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + ) + else: + data_to_json[idx] = { + "image_id": "frame_{:05d}.jpg".format(idx + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + return img + + +def draw_aapose_new( + img, + kp2ds, + threshold=0.6, + data_to_json=None, + idx=-1, + kp2ds_lhand=None, + kp2ds_rhand=None, + draw_hand=False, + stickwidth_type='v2', + body_stick_width=-1, + hand_stick_width=-1, + draw_head=True +): + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + + new_kep_list = [ + "Nose", + "Neck", + "RShoulder", + "RElbow", + "RWrist", # No.4 + "LShoulder", + "LElbow", + "LWrist", # No.7 + "RHip", + "RKnee", + "RAnkle", # No.10 + "LHip", + "LKnee", + "LAnkle", # No.13 + "REye", + "LEye", + "REar", + "LEar", + "LToe", + "RToe", + ] + # kp2ds_body = (kp2ds.copy()[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + \ + # kp2ds.copy()[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]]) / 2 + kp2ds = kp2ds.copy() + if not draw_head: + kp2ds[[0,14,15,16,17], 2] = 0 + kp2ds_body = kp2ds + + # kp2ds_lhand = kp2ds.copy()[91:112] + # kp2ds_rhand = kp2ds.copy()[112:133] + + limbSeq = [ + [2, 3], + [2, 6], # shoulders + [3, 4], + [4, 5], # left arm + [6, 7], + [7, 8], # right arm + [2, 9], + [9, 10], + [10, 11], # right leg + [2, 12], + [12, 13], + [13, 14], # left leg + [2, 1], + [1, 15], + [15, 17], + [1, 16], + [16, 18], # face (nose, eyes, ears) + [14, 19], + [11, 20], # foot + ] + + colors = [ + [255, 0, 0], + [255, 85, 0], + [255, 170, 0], + [255, 255, 0], + [170, 255, 0], + [85, 255, 0], + [0, 255, 0], + [0, 255, 85], + [0, 255, 170], + [0, 255, 255], + [0, 170, 255], + [0, 85, 255], + [0, 0, 255], + [85, 0, 255], + [170, 0, 255], + [255, 0, 255], + [255, 0, 170], + [255, 0, 85], + # foot + [200, 200, 0], + [100, 100, 0], + ] + + H, W, C = img.shape + H, W, C = img.shape + + #if stickwidth_type == 'v1': + # stickwidth = max(int(min(H, W) / 200), 1) + #elif stickwidth_type == 'v2': + if body_stick_width == -1: + stickwidth = max(int(min(H, W) / 200) - 1, 1) + else: + stickwidth = body_stick_width + + for _idx, ((k1_index, k2_index), color) in enumerate(zip(limbSeq, colors)): + keypoint1 = kp2ds_body[k1_index - 1] + keypoint2 = kp2ds_body[k2_index - 1] + + if keypoint1[-1] < threshold or keypoint2[-1] < threshold: + continue + + Y = np.array([keypoint1[0], keypoint2[0]]) + X = np.array([keypoint1[1], keypoint2[1]]) + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(img, polygon, [int(float(c) * 0.6) for c in color]) + + for _idx, (keypoint, color) in enumerate(zip(kp2ds_body, colors)): + if keypoint[-1] < threshold: + continue + x, y = keypoint[0], keypoint[1] + # cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) + cv2.circle(img, (int(x), int(y)), stickwidth, color, thickness=-1) + + if draw_hand: + img = draw_handpose_new(img, kp2ds_lhand, stickwidth_type=stickwidth_type, hand_score_th=threshold, hand_stick_width=hand_stick_width) + img = draw_handpose_new(img, kp2ds_rhand, stickwidth_type=stickwidth_type, hand_score_th=threshold, hand_stick_width=hand_stick_width) + + kp2ds_body[:, 0] /= W + kp2ds_body[:, 1] /= H + + if data_to_json is not None: + if idx == -1: + data_to_json.append( + { + "image_id": "frame_{:05d}.jpg".format(len(data_to_json) + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + ) + else: + data_to_json[idx] = { + "image_id": "frame_{:05d}.jpg".format(idx + 1), + "height": H, + "width": W, + "category_id": 1, + "keypoints_body": kp2ds_body.tolist(), + "keypoints_left_hand": kp2ds_lhand.tolist(), + "keypoints_right_hand": kp2ds_rhand.tolist(), + } + return img + + +def draw_bbox(img, bbox, color=(255, 0, 0)): + img = load_image(img) + bbox = [int(bbox_tmp) for bbox_tmp in bbox] + cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2) + return img + + +def draw_kp2ds(img, kp2ds, threshold=0, color=(255, 0, 0), skeleton=None, reverse=False): + img = load_image(img, reverse) + + if skeleton is not None: + if skeleton == "coco17": + skeleton_list = [ + [6, 8], + [8, 10], + [5, 7], + [7, 9], + [11, 13], + [13, 15], + [12, 14], + [14, 16], + [5, 6], + [6, 12], + [12, 11], + [11, 5], + ] + color_list = [ + (255, 0, 0), + (0, 255, 0), + (0, 0, 255), + (255, 255, 0), + (255, 0, 255), + (0, 255, 255), + ] + elif skeleton == "cocowholebody": + skeleton_list = [ + [6, 8], + [8, 10], + [5, 7], + [7, 9], + [11, 13], + [13, 15], + [12, 14], + [14, 16], + [5, 6], + [6, 12], + [12, 11], + [11, 5], + [15, 17], + [15, 18], + [15, 19], + [16, 20], + [16, 21], + [16, 22], + [91, 92, 93, 94, 95], + [91, 96, 97, 98, 99], + [91, 100, 101, 102, 103], + [91, 104, 105, 106, 107], + [91, 108, 109, 110, 111], + [112, 113, 114, 115, 116], + [112, 117, 118, 119, 120], + [112, 121, 122, 123, 124], + [112, 125, 126, 127, 128], + [112, 129, 130, 131, 132], + ] + color_list = [ + (255, 0, 0), + (0, 255, 0), + (0, 0, 255), + (255, 255, 0), + (255, 0, 255), + (0, 255, 255), + ] + else: + color_list = [color] + for _idx, _skeleton in enumerate(skeleton_list): + for i in range(len(_skeleton) - 1): + cv2.line( + img, + (int(kp2ds[_skeleton[i], 0]), int(kp2ds[_skeleton[i], 1])), + (int(kp2ds[_skeleton[i + 1], 0]), int(kp2ds[_skeleton[i + 1], 1])), + color_list[_idx % len(color_list)], + 3, + ) + + for _idx, kp2d in enumerate(kp2ds): + if kp2d[2] > threshold: + cv2.circle(img, (int(kp2d[0]), int(kp2d[1])), 3, color, -1) + # cv2.putText(img, + # str(_idx), + # (int(kp2d[0, i, 0])*1, + # int(kp2d[0, i, 1])*1), + # cv2.FONT_HERSHEY_SIMPLEX, + # 0.75, + # color, + # 2 + # ) + + return img + + +def draw_pcd(pcd_list, save_path=None): + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + color_list = ["r", "g", "b", "y", "p"] + + for _idx, _pcd in enumerate(pcd_list): + ax.scatter(_pcd[:, 0], _pcd[:, 1], _pcd[:, 2], c=color_list[_idx], marker="o") + + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Z") + + if save_path is not None: + plt.savefig(save_path) + else: + plt.savefig("tmp.png") + + +def load_image(img, reverse=False): + if type(img) == str: + img = cv2.imread(img) + if reverse: + img = img.astype(np.float32) + img = img[:, :, ::-1] + img = img.astype(np.uint8) + return img + + +def draw_skeleten(meta): + kps = [] + for i, kp in enumerate(meta["keypoints_body"]): + if kp is None: + # if kp is None: + kps.append([0, 0, 0]) + else: + kps.append([*kp, 1]) + kps = np.array(kps) + + kps[:, 0] *= meta["width"] + kps[:, 1] *= meta["height"] + pose_img = np.zeros([meta["height"], meta["width"], 3], dtype=np.uint8) + + pose_img = draw_aapose( + pose_img, + kps, + draw_hand=True, + kp2ds_lhand=meta["keypoints_left_hand"], + kp2ds_rhand=meta["keypoints_right_hand"], + ) + return pose_img + + +def draw_skeleten_with_pncc(pncc: np.ndarray, meta: Dict) -> np.ndarray: + """ + Args: + pncc: [H,W,3] + meta: required keys: keypoints_body: [N, 3] keypoints_left_hand, keypoints_right_hand + Return: + np.ndarray [H, W, 3] + """ + # preprocess keypoints + kps = [] + for i, kp in enumerate(meta["keypoints_body"]): + if kp is None: + # if kp is None: + kps.append([0, 0, 0]) + elif i in [14, 15, 16, 17]: + kps.append([0, 0, 0]) + else: + kps.append([*kp]) + kps = np.stack(kps) + + kps[:, 0] *= pncc.shape[1] + kps[:, 1] *= pncc.shape[0] + + # draw neck + canvas = np.zeros_like(pncc) + if kps[0][2] > 0.6 and kps[1][2] > 0.6: + canvas = draw_ellipse_by_2kp(canvas, kps[0], kps[1], [0, 0, 255]) + + # draw pncc + mask = (pncc > 0).max(axis=2) + canvas[mask] = pncc[mask] + pncc = canvas + + # draw other skeleten + kps[0] = 0 + + meta["keypoints_left_hand"][:, 0] *= meta["width"] + meta["keypoints_left_hand"][:, 1] *= meta["height"] + + meta["keypoints_right_hand"][:, 0] *= meta["width"] + meta["keypoints_right_hand"][:, 1] *= meta["height"] + pose_img = draw_aapose( + pncc, + kps, + draw_hand=True, + kp2ds_lhand=meta["keypoints_left_hand"], + kp2ds_rhand=meta["keypoints_right_hand"], + ) + return pose_img + + +FACE_CUSTOM_STYLE = { + "eyeball": {"indexs": [68, 69], "color": [255, 255, 255], "connect": False}, + "left_eyebrow": {"indexs": [17, 18, 19, 20, 21], "color": [0, 255, 0]}, + "right_eyebrow": {"indexs": [22, 23, 24, 25, 26], "color": [0, 0, 255]}, + "left_eye": {"indexs": [36, 37, 38, 39, 40, 41], "color": [255, 255, 0], "close": True}, + "right_eye": {"indexs": [42, 43, 44, 45, 46, 47], "color": [255, 0, 255], "close": True}, + "mouth_outside": {"indexs": list(range(48, 60)), "color": [100, 255, 50], "close": True}, + "mouth_inside": {"indexs": [60, 61, 62, 63, 64, 65, 66, 67], "color": [255, 100, 50], "close": True}, +} + + +def draw_face_kp(img, kps, thickness=2, style=FACE_CUSTOM_STYLE): + """ + Args: + img: [H, W, 3] + kps: [70, 2] + """ + img = img.copy() + for key, item in style.items(): + pts = np.array(kps[item["indexs"]]).astype(np.int32) + connect = item.get("connect", True) + color = item["color"] + close = item.get("close", False) + if connect: + cv2.polylines(img, [pts], close, color, thickness=thickness) + else: + for kp in pts: + kp = np.array(kp).astype(np.int32) + cv2.circle(img, kp, thickness * 2, color=color, thickness=-1) + return img + + +def draw_traj(metas: List[AAPoseMeta], threshold=0.6): + + colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85], [100, 255, 50], [255, 100, 50], + # foot + [200, 200, 0], + [100, 100, 0] + ] + limbSeq = [ + [1, 2], [1, 5], # shoulders + [2, 3], [3, 4], # left arm + [5, 6], [6, 7], # right arm + [1, 8], [8, 9], [9, 10], # right leg + [1, 11], [11, 12], [12, 13], # left leg + # face (nose, eyes, ears) + [13, 18], [10, 19] # foot + ] + + face_seq = [[1, 0], [0, 14], [14, 16], [0, 15], [15, 17]] + kp_body = np.array([meta.kps_body for meta in metas]) + kp_body_p = np.array([meta.kps_body_p for meta in metas]) + + + face_seq = random.sample(face_seq, 2) + + kp_lh = np.array([meta.kps_lhand for meta in metas]) + kp_rh = np.array([meta.kps_rhand for meta in metas]) + + kp_lh_p = np.array([meta.kps_lhand_p for meta in metas]) + kp_rh_p = np.array([meta.kps_rhand_p for meta in metas]) + + # kp_lh = np.concatenate([kp_lh, kp_lh_p], axis=-1) + # kp_rh = np.concatenate([kp_rh, kp_rh_p], axis=-1) + + new_limbSeq = [] + key_point_list = [] + for _idx, ((k1_index, k2_index)) in enumerate(limbSeq): + + vis = (kp_body_p[:, k1_index] > threshold) * (kp_body_p[:, k2_index] > threshold) * 1 + if vis.sum() * 1.0 / vis.shape[0] > 0.4: + new_limbSeq.append([k1_index, k2_index]) + + for _idx, ((k1_index, k2_index)) in enumerate(limbSeq): + + keypoint1 = kp_body[:, k1_index - 1] + keypoint2 = kp_body[:, k2_index - 1] + interleave = random.randint(4, 7) + randind = random.randint(0, interleave - 1) + # randind = random.rand(range(interleave), sampling_num) + + Y = np.array([keypoint1[:, 0], keypoint2[:, 0]]) + X = np.array([keypoint1[:, 1], keypoint2[:, 1]]) + + vis = (keypoint1[:, -1] > threshold) * (keypoint2[:, -1] > threshold) * 1 + + # for randidx in randind: + t = randind / interleave + x = (1-t)*Y[0, :] + t*Y[1, :] + y = (1-t)*X[0, :] + t*X[1, :] + + # np.array([1]) + x = x.astype(int) + y = y.astype(int) + + new_array = np.array([x, y, vis]).T + + key_point_list.append(new_array) + + indx_lh = random.randint(0, kp_lh.shape[1] - 1) + lh = kp_lh[:, indx_lh, :] + lh_p = kp_lh_p[:, indx_lh:indx_lh+1] + lh = np.concatenate([lh, lh_p], axis=-1) + + indx_rh = random.randint(0, kp_rh.shape[1] - 1) + rh = kp_rh[:, random.randint(0, kp_rh.shape[1] - 1), :] + rh_p = kp_rh_p[:, indx_rh:indx_rh+1] + rh = np.concatenate([rh, rh_p], axis=-1) + + + + lh[-1, :] = (lh[-1, :] > threshold) * 1 + rh[-1, :] = (rh[-1, :] > threshold) * 1 + + # print(rh.shape, new_array.shape) + # exit() + key_point_list.append(lh.astype(int)) + key_point_list.append(rh.astype(int)) + + + key_points_list = np.stack(key_point_list) + num_points = len(key_points_list) + sample_colors = random.sample(colors, num_points) + + stickwidth = max(int(min(metas[0].width, metas[0].height) / 150), 2) + + image_list_ori = [] + for i in range(key_points_list.shape[-2]): + _image_vis = np.zeros((metas[0].width, metas[0].height, 3)) + points = key_points_list[:, i, :] + for idx, point in enumerate(points): + x, y, vis = point + if vis == 1: + cv2.circle(_image_vis, (x, y), stickwidth, sample_colors[idx], thickness=-1) + + image_list_ori.append(_image_vis) + + return image_list_ori diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/pose2d_utils.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/pose2d_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..da16cac4c3fdbb547ece3f4195ae5fef8fe4ca24 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pose_utils/pose2d_utils.py @@ -0,0 +1,1110 @@ +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. +import warnings +import cv2 +import numpy as np +from typing import List + +def box_convert_simple(box, convert_type='xyxy2xywh'): + if convert_type == 'xyxy2xywh': + return [box[0], box[1], box[2] - box[0], box[3] - box[1]] + elif convert_type == 'xywh2xyxy': + return [box[0], box[1], box[2] + box[0], box[3] + box[1]] + elif convert_type == 'xyxy2ctwh': + return [(box[0] + box[2]) / 2, (box[1] + box[3]) / 2, box[2] - box[0], box[3] - box[1]] + elif convert_type == 'ctwh2xyxy': + return [box[0] - box[2] // 2, box[1] - box[3] // 2, box[0] + (box[2] - box[2] // 2), box[1] + (box[3] - box[3] // 2)] + +class AAPoseMeta: + def __init__(self, meta=None, kp2ds=None): + self.image_id = "" + self.height = 0 + self.width = 0 + + self.kps_body: np.ndarray = None + self.kps_lhand: np.ndarray = None + self.kps_rhand: np.ndarray = None + self.kps_face: np.ndarray = None + self.kps_body_p: np.ndarray = None + self.kps_lhand_p: np.ndarray = None + self.kps_rhand_p: np.ndarray = None + self.kps_face_p: np.ndarray = None + + + if meta is not None: + self.load_from_meta(meta) + elif kp2ds is not None: + self.load_from_kp2ds(kp2ds) + + def is_valid(self, kp, p, threshold): + x, y = kp + if x < 0 or y < 0 or x > self.width or y > self.height or p < threshold: + return False + else: + return True + + def get_bbox(self, kp, kp_p, threshold=0.5): + kps = kp[kp_p > threshold] + if kps.size == 0: + return 0, 0, 0, 0 + x0, y0 = kps.min(axis=0) + x1, y1 = kps.max(axis=0) + return x0, y0, x1, y1 + + def crop(self, x0, y0, x1, y1): + all_kps = [self.kps_body, self.kps_lhand, self.kps_rhand, self.kps_face] + for kps in all_kps: + if kps is not None: + kps[:, 0] -= x0 + kps[:, 1] -= y0 + self.width = x1 - x0 + self.height = y1 - y0 + return self + + def resize(self, width, height): + scale_x = width / self.width + scale_y = height / self.height + all_kps = [self.kps_body, self.kps_lhand, self.kps_rhand, self.kps_face] + for kps in all_kps: + if kps is not None: + kps[:, 0] *= scale_x + kps[:, 1] *= scale_y + self.width = width + self.height = height + return self + + + def get_kps_body_with_p(self, normalize=False): + kps_body = self.kps_body.copy() + if normalize: + kps_body = kps_body / np.array([self.width, self.height]) + + return np.concatenate([kps_body, self.kps_body_p[:, None]]) + + @staticmethod + def from_kps_face(kps_face: np.ndarray, height: int, width: int): + + pose_meta = AAPoseMeta() + pose_meta.kps_face = kps_face[:, :2] + if kps_face.shape[1] == 3: + pose_meta.kps_face_p = kps_face[:, 2] + else: + pose_meta.kps_face_p = kps_face[:, 0] * 0 + 1 + pose_meta.height = height + pose_meta.width = width + return pose_meta + + @staticmethod + def from_kps_body(kps_body: np.ndarray, height: int, width: int): + + pose_meta = AAPoseMeta() + pose_meta.kps_body = kps_body[:, :2] + pose_meta.kps_body_p = kps_body[:, 2] + pose_meta.height = height + pose_meta.width = width + return pose_meta + @staticmethod + def from_humanapi_meta(meta): + pose_meta = AAPoseMeta() + width, height = meta["width"], meta["height"] + pose_meta.width = width + pose_meta.height = height + pose_meta.kps_body = meta["keypoints_body"][:, :2] * (width, height) + pose_meta.kps_body_p = meta["keypoints_body"][:, 2] + pose_meta.kps_lhand = meta["keypoints_left_hand"][:, :2] * (width, height) + pose_meta.kps_lhand_p = meta["keypoints_left_hand"][:, 2] + pose_meta.kps_rhand = meta["keypoints_right_hand"][:, :2] * (width, height) + pose_meta.kps_rhand_p = meta["keypoints_right_hand"][:, 2] + if 'keypoints_face' in meta: + pose_meta.kps_face = meta["keypoints_face"][:, :2] * (width, height) + pose_meta.kps_face_p = meta["keypoints_face"][:, 2] + return pose_meta + + def load_from_meta(self, meta, norm_body=True, norm_hand=False): + + self.image_id = meta.get("image_id", "00000.png") + self.height = meta["height"] + self.width = meta["width"] + kps_body_p = [] + kps_body = [] + for kp in meta["keypoints_body"]: + if kp is None: + kps_body.append([0, 0]) + kps_body_p.append(0) + else: + kps_body.append(kp) + kps_body_p.append(1) + + self.kps_body = np.array(kps_body) + self.kps_body[:, 0] *= self.width + self.kps_body[:, 1] *= self.height + self.kps_body_p = np.array(kps_body_p) + + self.kps_lhand = np.array(meta["keypoints_left_hand"])[:, :2] + self.kps_lhand_p = np.array(meta["keypoints_left_hand"])[:, 2] + self.kps_rhand = np.array(meta["keypoints_right_hand"])[:, :2] + self.kps_rhand_p = np.array(meta["keypoints_right_hand"])[:, 2] + + @staticmethod + def load_from_kp2ds(kp2ds: List[np.ndarray], width: int, height: int): + """input 133x3 numpy keypoints and output AAPoseMeta + + Args: + kp2ds (List[np.ndarray]): _description_ + width (int): _description_ + height (int): _description_ + + Returns: + _type_: _description_ + """ + pose_meta = AAPoseMeta() + pose_meta.width = width + pose_meta.height = height + kps_body = (kp2ds[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + kp2ds[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]]) / 2 + kps_lhand = kp2ds[91:112] + kps_rhand = kp2ds[112:133] + kps_face = np.concatenate([kp2ds[23:23+68], kp2ds[1:3]], axis=0) + pose_meta.kps_body = kps_body[:, :2] + pose_meta.kps_body_p = kps_body[:, 2] + pose_meta.kps_lhand = kps_lhand[:, :2] + pose_meta.kps_lhand_p = kps_lhand[:, 2] + pose_meta.kps_rhand = kps_rhand[:, :2] + pose_meta.kps_rhand_p = kps_rhand[:, 2] + pose_meta.kps_face = kps_face[:, :2] + pose_meta.kps_face_p = kps_face[:, 2] + return pose_meta + + @staticmethod + def from_dwpose(dwpose_det_res, height, width): + pose_meta = AAPoseMeta() + pose_meta.kps_body = dwpose_det_res["bodies"]["candidate"] + pose_meta.kps_body_p = dwpose_det_res["bodies"]["score"] + pose_meta.kps_body[:, 0] *= width + pose_meta.kps_body[:, 1] *= height + + pose_meta.kps_lhand, pose_meta.kps_rhand = dwpose_det_res["hands"] + pose_meta.kps_lhand[:, 0] *= width + pose_meta.kps_lhand[:, 1] *= height + pose_meta.kps_rhand[:, 0] *= width + pose_meta.kps_rhand[:, 1] *= height + pose_meta.kps_lhand_p, pose_meta.kps_rhand_p = dwpose_det_res["hands_score"] + + pose_meta.kps_face = dwpose_det_res["faces"][0] + pose_meta.kps_face[:, 0] *= width + pose_meta.kps_face[:, 1] *= height + pose_meta.kps_face_p = dwpose_det_res["faces_score"][0] + return pose_meta + + def save_json(self): + pass + + def draw_aapose(self, img, threshold=0.5, stick_width_norm=200, draw_hand=True, draw_head=True): + from .human_visualization import draw_aapose_by_meta + return draw_aapose_by_meta(img, self, threshold, stick_width_norm, draw_hand, draw_head) + + + def translate(self, x0, y0): + all_kps = [self.kps_body, self.kps_lhand, self.kps_rhand, self.kps_face] + for kps in all_kps: + if kps is not None: + kps[:, 0] -= x0 + kps[:, 1] -= y0 + + def scale(self, sx, sy): + all_kps = [self.kps_body, self.kps_lhand, self.kps_rhand, self.kps_face] + for kps in all_kps: + if kps is not None: + kps[:, 0] *= sx + kps[:, 1] *= sy + + def padding_resize2(self, height=512, width=512): + """kps will be changed inplace + + """ + + all_kps = [self.kps_body, self.kps_lhand, self.kps_rhand, self.kps_face] + + ori_height, ori_width = self.height, self.width + + if (ori_height / ori_width) > (height / width): + new_width = int(height / ori_height * ori_width) + padding = int((width - new_width) / 2) + padding_width = padding + padding_height = 0 + scale = height / ori_height + + for kps in all_kps: + if kps is not None: + kps[:, 0] = kps[:, 0] * scale + padding + kps[:, 1] = kps[:, 1] * scale + + else: + new_height = int(width / ori_width * ori_height) + padding = int((height - new_height) / 2) + padding_width = 0 + padding_height = padding + scale = width / ori_width + for kps in all_kps: + if kps is not None: + kps[:, 1] = kps[:, 1] * scale + padding + kps[:, 0] = kps[:, 0] * scale + + + self.width = width + self.height = height + return self + + +def transform_preds(coords, center, scale, output_size, use_udp=False): + """Get final keypoint predictions from heatmaps and apply scaling and + translation to map them back to the image. + + Note: + num_keypoints: K + + Args: + coords (np.ndarray[K, ndims]): + + * If ndims=2, corrds are predicted keypoint location. + * If ndims=4, corrds are composed of (x, y, scores, tags) + * If ndims=5, corrds are composed of (x, y, scores, tags, + flipped_tags) + + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + output_size (np.ndarray[2, ] | list(2,)): Size of the + destination heatmaps. + use_udp (bool): Use unbiased data processing + + Returns: + np.ndarray: Predicted coordinates in the images. + """ + assert coords.shape[1] in (2, 4, 5) + assert len(center) == 2 + assert len(scale) == 2 + assert len(output_size) == 2 + + # Recover the scale which is normalized by a factor of 200. + # scale = scale * 200.0 + + if use_udp: + scale_x = scale[0] / (output_size[0] - 1.0) + scale_y = scale[1] / (output_size[1] - 1.0) + else: + scale_x = scale[0] / output_size[0] + scale_y = scale[1] / output_size[1] + + target_coords = np.ones_like(coords) + target_coords[:, 0] = coords[:, 0] * scale_x + center[0] - scale[0] * 0.5 + target_coords[:, 1] = coords[:, 1] * scale_y + center[1] - scale[1] * 0.5 + + return target_coords + + +def _calc_distances(preds, targets, mask, normalize): + """Calculate the normalized distances between preds and target. + + Note: + batch_size: N + num_keypoints: K + dimension of keypoints: D (normally, D=2 or D=3) + + Args: + preds (np.ndarray[N, K, D]): Predicted keypoint location. + targets (np.ndarray[N, K, D]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + normalize (np.ndarray[N, D]): Typical value is heatmap_size + + Returns: + np.ndarray[K, N]: The normalized distances. \ + If target keypoints are missing, the distance is -1. + """ + N, K, _ = preds.shape + # set mask=0 when normalize==0 + _mask = mask.copy() + _mask[np.where((normalize == 0).sum(1))[0], :] = False + distances = np.full((N, K), -1, dtype=np.float32) + # handle invalid values + normalize[np.where(normalize <= 0)] = 1e6 + distances[_mask] = np.linalg.norm( + ((preds - targets) / normalize[:, None, :])[_mask], axis=-1) + return distances.T + + +def _distance_acc(distances, thr=0.5): + """Return the percentage below the distance threshold, while ignoring + distances values with -1. + + Note: + batch_size: N + Args: + distances (np.ndarray[N, ]): The normalized distances. + thr (float): Threshold of the distances. + + Returns: + float: Percentage of distances below the threshold. \ + If all target keypoints are missing, return -1. + """ + distance_valid = distances != -1 + num_distance_valid = distance_valid.sum() + if num_distance_valid > 0: + return (distances[distance_valid] < thr).sum() / num_distance_valid + return -1 + + +def _get_max_preds(heatmaps): + """Get keypoint predictions from score maps. + + Note: + batch_size: N + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps. + + Returns: + tuple: A tuple containing aggregated results. + + - preds (np.ndarray[N, K, 2]): Predicted keypoint location. + - maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints. + """ + assert isinstance(heatmaps, + np.ndarray), ('heatmaps should be numpy.ndarray') + assert heatmaps.ndim == 4, 'batch_images should be 4-ndim' + + N, K, _, W = heatmaps.shape + heatmaps_reshaped = heatmaps.reshape((N, K, -1)) + idx = np.argmax(heatmaps_reshaped, 2).reshape((N, K, 1)) + maxvals = np.amax(heatmaps_reshaped, 2).reshape((N, K, 1)) + + preds = np.tile(idx, (1, 1, 2)).astype(np.float32) + preds[:, :, 0] = preds[:, :, 0] % W + preds[:, :, 1] = preds[:, :, 1] // W + + preds = np.where(np.tile(maxvals, (1, 1, 2)) > 0.0, preds, -1) + return preds, maxvals + + +def _get_max_preds_3d(heatmaps): + """Get keypoint predictions from 3D score maps. + + Note: + batch size: N + num keypoints: K + heatmap depth size: D + heatmap height: H + heatmap width: W + + Args: + heatmaps (np.ndarray[N, K, D, H, W]): model predicted heatmaps. + + Returns: + tuple: A tuple containing aggregated results. + + - preds (np.ndarray[N, K, 3]): Predicted keypoint location. + - maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints. + """ + assert isinstance(heatmaps, np.ndarray), \ + ('heatmaps should be numpy.ndarray') + assert heatmaps.ndim == 5, 'heatmaps should be 5-ndim' + + N, K, D, H, W = heatmaps.shape + heatmaps_reshaped = heatmaps.reshape((N, K, -1)) + idx = np.argmax(heatmaps_reshaped, 2).reshape((N, K, 1)) + maxvals = np.amax(heatmaps_reshaped, 2).reshape((N, K, 1)) + + preds = np.zeros((N, K, 3), dtype=np.float32) + _idx = idx[..., 0] + preds[..., 2] = _idx // (H * W) + preds[..., 1] = (_idx // W) % H + preds[..., 0] = _idx % W + + preds = np.where(maxvals > 0.0, preds, -1) + return preds, maxvals + + +def pose_pck_accuracy(output, target, mask, thr=0.05, normalize=None): + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints from heatmaps. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - batch_size: N + - num_keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + output (np.ndarray[N, K, H, W]): Model output heatmaps. + target (np.ndarray[N, K, H, W]): Groundtruth heatmaps. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. Default 0.05. + normalize (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - np.ndarray[K]: Accuracy of each keypoint. + - float: Averaged accuracy across all keypoints. + - int: Number of valid keypoints. + """ + N, K, H, W = output.shape + if K == 0: + return None, 0, 0 + if normalize is None: + normalize = np.tile(np.array([[H, W]]), (N, 1)) + + pred, _ = _get_max_preds(output) + gt, _ = _get_max_preds(target) + return keypoint_pck_accuracy(pred, gt, mask, thr, normalize) + + +def keypoint_pck_accuracy(pred, gt, mask, thr, normalize): + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints for coordinates. + + Note: + PCK metric measures accuracy of the localization of the body joints. + The distances between predicted positions and the ground-truth ones + are typically normalized by the bounding box size. + The threshold (thr) of the normalized distance is commonly set + as 0.05, 0.1 or 0.2 etc. + + - batch_size: N + - num_keypoints: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + thr (float): Threshold of PCK calculation. + normalize (np.ndarray[N, 2]): Normalization factor for H&W. + + Returns: + tuple: A tuple containing keypoint accuracy. + + - acc (np.ndarray[K]): Accuracy of each keypoint. + - avg_acc (float): Averaged accuracy across all keypoints. + - cnt (int): Number of valid keypoints. + """ + distances = _calc_distances(pred, gt, mask, normalize) + + acc = np.array([_distance_acc(d, thr) for d in distances]) + valid_acc = acc[acc >= 0] + cnt = len(valid_acc) + avg_acc = valid_acc.mean() if cnt > 0 else 0 + return acc, avg_acc, cnt + + +def keypoint_auc(pred, gt, mask, normalize, num_step=20): + """Calculate the pose accuracy of PCK for each individual keypoint and the + averaged accuracy across all keypoints for coordinates. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + normalize (float): Normalization factor. + + Returns: + float: Area under curve. + """ + nor = np.tile(np.array([[normalize, normalize]]), (pred.shape[0], 1)) + x = [1.0 * i / num_step for i in range(num_step)] + y = [] + for thr in x: + _, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor) + y.append(avg_acc) + + auc = 0 + for i in range(num_step): + auc += 1.0 / num_step * y[i] + return auc + + +def keypoint_nme(pred, gt, mask, normalize_factor): + """Calculate the normalized mean error (NME). + + Note: + - batch_size: N + - num_keypoints: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + normalize_factor (np.ndarray[N, 2]): Normalization factor. + + Returns: + float: normalized mean error + """ + distances = _calc_distances(pred, gt, mask, normalize_factor) + distance_valid = distances[distances != -1] + return distance_valid.sum() / max(1, len(distance_valid)) + + +def keypoint_epe(pred, gt, mask): + """Calculate the end-point error. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + pred (np.ndarray[N, K, 2]): Predicted keypoint location. + gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. + mask (np.ndarray[N, K]): Visibility of the target. False for invisible + joints, and True for visible. Invisible joints will be ignored for + accuracy calculation. + + Returns: + float: Average end-point error. + """ + + distances = _calc_distances( + pred, gt, mask, + np.ones((pred.shape[0], pred.shape[2]), dtype=np.float32)) + distance_valid = distances[distances != -1] + return distance_valid.sum() / max(1, len(distance_valid)) + + +def _taylor(heatmap, coord): + """Distribution aware coordinate decoding method. + + Note: + - heatmap height: H + - heatmap width: W + + Args: + heatmap (np.ndarray[H, W]): Heatmap of a particular joint type. + coord (np.ndarray[2,]): Coordinates of the predicted keypoints. + + Returns: + np.ndarray[2,]: Updated coordinates. + """ + H, W = heatmap.shape[:2] + px, py = int(coord[0]), int(coord[1]) + if 1 < px < W - 2 and 1 < py < H - 2: + dx = 0.5 * (heatmap[py][px + 1] - heatmap[py][px - 1]) + dy = 0.5 * (heatmap[py + 1][px] - heatmap[py - 1][px]) + dxx = 0.25 * ( + heatmap[py][px + 2] - 2 * heatmap[py][px] + heatmap[py][px - 2]) + dxy = 0.25 * ( + heatmap[py + 1][px + 1] - heatmap[py - 1][px + 1] - + heatmap[py + 1][px - 1] + heatmap[py - 1][px - 1]) + dyy = 0.25 * ( + heatmap[py + 2 * 1][px] - 2 * heatmap[py][px] + + heatmap[py - 2 * 1][px]) + derivative = np.array([[dx], [dy]]) + hessian = np.array([[dxx, dxy], [dxy, dyy]]) + if dxx * dyy - dxy**2 != 0: + hessianinv = np.linalg.inv(hessian) + offset = -hessianinv @ derivative + offset = np.squeeze(np.array(offset.T), axis=0) + coord += offset + return coord + + +def post_dark_udp(coords, batch_heatmaps, kernel=3): + """DARK post-pocessing. Implemented by udp. Paper ref: Huang et al. The + Devil is in the Details: Delving into Unbiased Data Processing for Human + Pose Estimation (CVPR 2020). Zhang et al. Distribution-Aware Coordinate + Representation for Human Pose Estimation (CVPR 2020). + + Note: + - batch size: B + - num keypoints: K + - num persons: N + - height of heatmaps: H + - width of heatmaps: W + + B=1 for bottom_up paradigm where all persons share the same heatmap. + B=N for top_down paradigm where each person has its own heatmaps. + + Args: + coords (np.ndarray[N, K, 2]): Initial coordinates of human pose. + batch_heatmaps (np.ndarray[B, K, H, W]): batch_heatmaps + kernel (int): Gaussian kernel size (K) for modulation. + + Returns: + np.ndarray([N, K, 2]): Refined coordinates. + """ + if not isinstance(batch_heatmaps, np.ndarray): + batch_heatmaps = batch_heatmaps.cpu().numpy() + B, K, H, W = batch_heatmaps.shape + N = coords.shape[0] + assert (B == 1 or B == N) + for heatmaps in batch_heatmaps: + for heatmap in heatmaps: + cv2.GaussianBlur(heatmap, (kernel, kernel), 0, heatmap) + np.clip(batch_heatmaps, 0.001, 50, batch_heatmaps) + np.log(batch_heatmaps, batch_heatmaps) + + batch_heatmaps_pad = np.pad( + batch_heatmaps, ((0, 0), (0, 0), (1, 1), (1, 1)), + mode='edge').flatten() + + index = coords[..., 0] + 1 + (coords[..., 1] + 1) * (W + 2) + index += (W + 2) * (H + 2) * np.arange(0, B * K).reshape(-1, K) + index = index.astype(int).reshape(-1, 1) + i_ = batch_heatmaps_pad[index] + ix1 = batch_heatmaps_pad[index + 1] + iy1 = batch_heatmaps_pad[index + W + 2] + ix1y1 = batch_heatmaps_pad[index + W + 3] + ix1_y1_ = batch_heatmaps_pad[index - W - 3] + ix1_ = batch_heatmaps_pad[index - 1] + iy1_ = batch_heatmaps_pad[index - 2 - W] + + dx = 0.5 * (ix1 - ix1_) + dy = 0.5 * (iy1 - iy1_) + derivative = np.concatenate([dx, dy], axis=1) + derivative = derivative.reshape(N, K, 2, 1) + dxx = ix1 - 2 * i_ + ix1_ + dyy = iy1 - 2 * i_ + iy1_ + dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_) + hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1) + hessian = hessian.reshape(N, K, 2, 2) + hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2)) + coords -= np.einsum('ijmn,ijnk->ijmk', hessian, derivative).squeeze() + return coords + + +def _gaussian_blur(heatmaps, kernel=11): + """Modulate heatmap distribution with Gaussian. + sigma = 0.3*((kernel_size-1)*0.5-1)+0.8 + sigma~=3 if k=17 + sigma=2 if k=11; + sigma~=1.5 if k=7; + sigma~=1 if k=3; + + Note: + - batch_size: N + - num_keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps. + kernel (int): Gaussian kernel size (K) for modulation, which should + match the heatmap gaussian sigma when training. + K=17 for sigma=3 and k=11 for sigma=2. + + Returns: + np.ndarray ([N, K, H, W]): Modulated heatmap distribution. + """ + assert kernel % 2 == 1 + + border = (kernel - 1) // 2 + batch_size = heatmaps.shape[0] + num_joints = heatmaps.shape[1] + height = heatmaps.shape[2] + width = heatmaps.shape[3] + for i in range(batch_size): + for j in range(num_joints): + origin_max = np.max(heatmaps[i, j]) + dr = np.zeros((height + 2 * border, width + 2 * border), + dtype=np.float32) + dr[border:-border, border:-border] = heatmaps[i, j].copy() + dr = cv2.GaussianBlur(dr, (kernel, kernel), 0) + heatmaps[i, j] = dr[border:-border, border:-border].copy() + heatmaps[i, j] *= origin_max / np.max(heatmaps[i, j]) + return heatmaps + + +def keypoints_from_regression(regression_preds, center, scale, img_size): + """Get final keypoint predictions from regression vectors and transform + them back to the image. + + Note: + - batch_size: N + - num_keypoints: K + + Args: + regression_preds (np.ndarray[N, K, 2]): model prediction. + center (np.ndarray[N, 2]): Center of the bounding box (x, y). + scale (np.ndarray[N, 2]): Scale of the bounding box + wrt height/width. + img_size (list(img_width, img_height)): model input image size. + + Returns: + tuple: + + - preds (np.ndarray[N, K, 2]): Predicted keypoint location in images. + - maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints. + """ + N, K, _ = regression_preds.shape + preds, maxvals = regression_preds, np.ones((N, K, 1), dtype=np.float32) + + preds = preds * img_size + + # Transform back to the image + for i in range(N): + preds[i] = transform_preds(preds[i], center[i], scale[i], img_size) + + return preds, maxvals + + +def keypoints_from_heatmaps(heatmaps, + center, + scale, + unbiased=False, + post_process='default', + kernel=11, + valid_radius_factor=0.0546875, + use_udp=False, + target_type='GaussianHeatmap'): + """Get final keypoint predictions from heatmaps and transform them back to + the image. + + Note: + - batch size: N + - num keypoints: K + - heatmap height: H + - heatmap width: W + + Args: + heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps. + center (np.ndarray[N, 2]): Center of the bounding box (x, y). + scale (np.ndarray[N, 2]): Scale of the bounding box + wrt height/width. + post_process (str/None): Choice of methods to post-process + heatmaps. Currently supported: None, 'default', 'unbiased', + 'megvii'. + unbiased (bool): Option to use unbiased decoding. Mutually + exclusive with megvii. + Note: this arg is deprecated and unbiased=True can be replaced + by post_process='unbiased' + Paper ref: Zhang et al. Distribution-Aware Coordinate + Representation for Human Pose Estimation (CVPR 2020). + kernel (int): Gaussian kernel size (K) for modulation, which should + match the heatmap gaussian sigma when training. + K=17 for sigma=3 and k=11 for sigma=2. + valid_radius_factor (float): The radius factor of the positive area + in classification heatmap for UDP. + use_udp (bool): Use unbiased data processing. + target_type (str): 'GaussianHeatmap' or 'CombinedTarget'. + GaussianHeatmap: Classification target with gaussian distribution. + CombinedTarget: The combination of classification target + (response map) and regression target (offset map). + Paper ref: Huang et al. The Devil is in the Details: Delving into + Unbiased Data Processing for Human Pose Estimation (CVPR 2020). + + Returns: + tuple: A tuple containing keypoint predictions and scores. + + - preds (np.ndarray[N, K, 2]): Predicted keypoint location in images. + - maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints. + """ + # Avoid being affected + heatmaps = heatmaps.copy() + + # detect conflicts + if unbiased: + assert post_process not in [False, None, 'megvii'] + if post_process in ['megvii', 'unbiased']: + assert kernel > 0 + if use_udp: + assert not post_process == 'megvii' + + # normalize configs + if post_process is False: + warnings.warn( + 'post_process=False is deprecated, ' + 'please use post_process=None instead', DeprecationWarning) + post_process = None + elif post_process is True: + if unbiased is True: + warnings.warn( + 'post_process=True, unbiased=True is deprecated,' + " please use post_process='unbiased' instead", + DeprecationWarning) + post_process = 'unbiased' + else: + warnings.warn( + 'post_process=True, unbiased=False is deprecated, ' + "please use post_process='default' instead", + DeprecationWarning) + post_process = 'default' + elif post_process == 'default': + if unbiased is True: + warnings.warn( + 'unbiased=True is deprecated, please use ' + "post_process='unbiased' instead", DeprecationWarning) + post_process = 'unbiased' + + # start processing + if post_process == 'megvii': + heatmaps = _gaussian_blur(heatmaps, kernel=kernel) + + N, K, H, W = heatmaps.shape + if use_udp: + if target_type.lower() == 'GaussianHeatMap'.lower(): + preds, maxvals = _get_max_preds(heatmaps) + preds = post_dark_udp(preds, heatmaps, kernel=kernel) + elif target_type.lower() == 'CombinedTarget'.lower(): + for person_heatmaps in heatmaps: + for i, heatmap in enumerate(person_heatmaps): + kt = 2 * kernel + 1 if i % 3 == 0 else kernel + cv2.GaussianBlur(heatmap, (kt, kt), 0, heatmap) + # valid radius is in direct proportion to the height of heatmap. + valid_radius = valid_radius_factor * H + offset_x = heatmaps[:, 1::3, :].flatten() * valid_radius + offset_y = heatmaps[:, 2::3, :].flatten() * valid_radius + heatmaps = heatmaps[:, ::3, :] + preds, maxvals = _get_max_preds(heatmaps) + index = preds[..., 0] + preds[..., 1] * W + index += W * H * np.arange(0, N * K / 3) + index = index.astype(int).reshape(N, K // 3, 1) + preds += np.concatenate((offset_x[index], offset_y[index]), axis=2) + else: + raise ValueError('target_type should be either ' + "'GaussianHeatmap' or 'CombinedTarget'") + else: + preds, maxvals = _get_max_preds(heatmaps) + if post_process == 'unbiased': # alleviate biased coordinate + # apply Gaussian distribution modulation. + heatmaps = np.log( + np.maximum(_gaussian_blur(heatmaps, kernel), 1e-10)) + for n in range(N): + for k in range(K): + preds[n][k] = _taylor(heatmaps[n][k], preds[n][k]) + elif post_process is not None: + # add +/-0.25 shift to the predicted locations for higher acc. + for n in range(N): + for k in range(K): + heatmap = heatmaps[n][k] + px = int(preds[n][k][0]) + py = int(preds[n][k][1]) + if 1 < px < W - 1 and 1 < py < H - 1: + diff = np.array([ + heatmap[py][px + 1] - heatmap[py][px - 1], + heatmap[py + 1][px] - heatmap[py - 1][px] + ]) + preds[n][k] += np.sign(diff) * .25 + if post_process == 'megvii': + preds[n][k] += 0.5 + + # Transform back to the image + for i in range(N): + preds[i] = transform_preds( + preds[i], center[i], scale[i], [W, H], use_udp=use_udp) + + if post_process == 'megvii': + maxvals = maxvals / 255.0 + 0.5 + + return preds, maxvals + + +def keypoints_from_heatmaps3d(heatmaps, center, scale): + """Get final keypoint predictions from 3d heatmaps and transform them back + to the image. + + Note: + - batch size: N + - num keypoints: K + - heatmap depth size: D + - heatmap height: H + - heatmap width: W + + Args: + heatmaps (np.ndarray[N, K, D, H, W]): model predicted heatmaps. + center (np.ndarray[N, 2]): Center of the bounding box (x, y). + scale (np.ndarray[N, 2]): Scale of the bounding box + wrt height/width. + + Returns: + tuple: A tuple containing keypoint predictions and scores. + + - preds (np.ndarray[N, K, 3]): Predicted 3d keypoint location \ + in images. + - maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints. + """ + N, K, D, H, W = heatmaps.shape + preds, maxvals = _get_max_preds_3d(heatmaps) + # Transform back to the image + for i in range(N): + preds[i, :, :2] = transform_preds(preds[i, :, :2], center[i], scale[i], + [W, H]) + return preds, maxvals + + +def multilabel_classification_accuracy(pred, gt, mask, thr=0.5): + """Get multi-label classification accuracy. + + Note: + - batch size: N + - label number: L + + Args: + pred (np.ndarray[N, L, 2]): model predicted labels. + gt (np.ndarray[N, L, 2]): ground-truth labels. + mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of + ground-truth labels. + + Returns: + float: multi-label classification accuracy. + """ + # we only compute accuracy on the samples with ground-truth of all labels. + valid = (mask > 0).min(axis=1) if mask.ndim == 2 else (mask > 0) + pred, gt = pred[valid], gt[valid] + + if pred.shape[0] == 0: + acc = 0.0 # when no sample is with gt labels, set acc to 0. + else: + # The classification of a sample is regarded as correct + # only if it's correct for all labels. + acc = (((pred - thr) * (gt - thr)) > 0).all(axis=1).mean() + return acc + + + +def get_transform(center, scale, res, rot=0): + """Generate transformation matrix.""" + # res: (height, width), (rows, cols) + crop_aspect_ratio = res[0] / float(res[1]) + h = 200 * scale + w = h / crop_aspect_ratio + t = np.zeros((3, 3)) + t[0, 0] = float(res[1]) / w + t[1, 1] = float(res[0]) / h + t[0, 2] = res[1] * (-float(center[0]) / w + .5) + t[1, 2] = res[0] * (-float(center[1]) / h + .5) + t[2, 2] = 1 + if not rot == 0: + rot = -rot # To match direction of rotation from cropping + rot_mat = np.zeros((3, 3)) + rot_rad = rot * np.pi / 180 + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + rot_mat[0, :2] = [cs, -sn] + rot_mat[1, :2] = [sn, cs] + rot_mat[2, 2] = 1 + # Need to rotate around center + t_mat = np.eye(3) + t_mat[0, 2] = -res[1] / 2 + t_mat[1, 2] = -res[0] / 2 + t_inv = t_mat.copy() + t_inv[:2, 2] *= -1 + t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t))) + return t + + +def transform(pt, center, scale, res, invert=0, rot=0): + """Transform pixel location to different reference.""" + t = get_transform(center, scale, res, rot=rot) + if invert: + t = np.linalg.inv(t) + new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T + new_pt = np.dot(t, new_pt) + return np.array([round(new_pt[0]), round(new_pt[1])], dtype=int) + 1 + + +def bbox_from_detector(bbox, input_resolution=(224, 224), rescale=1.25): + """ + Get center and scale of bounding box from bounding box. + The expected format is [min_x, min_y, max_x, max_y]. + """ + CROP_IMG_HEIGHT, CROP_IMG_WIDTH = input_resolution + CROP_ASPECT_RATIO = CROP_IMG_HEIGHT / float(CROP_IMG_WIDTH) + + # center + center_x = (bbox[0] + bbox[2]) / 2.0 + center_y = (bbox[1] + bbox[3]) / 2.0 + center = np.array([center_x, center_y]) + + # scale + bbox_w = bbox[2] - bbox[0] + bbox_h = bbox[3] - bbox[1] + bbox_size = max(bbox_w * CROP_ASPECT_RATIO, bbox_h) + + scale = np.array([bbox_size / CROP_ASPECT_RATIO, bbox_size]) / 200.0 + # scale = bbox_size / 200.0 + # adjust bounding box tightness + scale *= rescale + return center, scale + + +def crop(img, center, scale, res): + """ + Crop image according to the supplied bounding box. + res: [rows, cols] + """ + # Upper left point + ul = np.array(transform([1, 1], center, max(scale), res, invert=1)) - 1 + # Bottom right point + br = np.array(transform([res[1] + 1, res[0] + 1], center, max(scale), res, invert=1)) - 1 + + new_shape = [br[1] - ul[1], br[0] - ul[0]] + if len(img.shape) > 2: + new_shape += [img.shape[2]] + new_img = np.zeros(new_shape, dtype=np.float32) + + # Range to fill new array + new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0] + new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1] + # Range to sample from original image + old_x = max(0, ul[0]), min(len(img[0]), br[0]) + old_y = max(0, ul[1]), min(len(img), br[1]) + try: + new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]] + except Exception as e: + print(e) + + new_img = cv2.resize(new_img, (res[1], res[0])) # (cols, rows) + return new_img, new_shape, (old_x, old_y), (new_x, new_y) # , ul, br + + +def split_kp2ds_for_aa(kp2ds, ret_face=False): + kp2ds_body = (kp2ds[[0, 6, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 17, 20]] + kp2ds[[0, 5, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 21]]) / 2 + kp2ds_lhand = kp2ds[91:112] + kp2ds_rhand = kp2ds[112:133] + kp2ds_face = kp2ds[22:91] + if ret_face: + return kp2ds_body.copy(), kp2ds_lhand.copy(), kp2ds_rhand.copy(), kp2ds_face.copy() + return kp2ds_body.copy(), kp2ds_lhand.copy(), kp2ds_rhand.copy() + + +def load_pose_metas_from_kp2ds_seq(kp2ds_seq, width, height): + metas = [] + last_kp2ds_body = None + for kps in kp2ds_seq: + kps = kps.copy() + kps[:, 0] /= width + kps[:, 1] /= height + kp2ds_body, kp2ds_lhand, kp2ds_rhand, kp2ds_face = split_kp2ds_for_aa(kps, ret_face=True) + + # Exclude cases where all values are less than 0 + if last_kp2ds_body is not None and kp2ds_body[:, :2].min(axis=1).max() < 0: + kp2ds_body = last_kp2ds_body + last_kp2ds_body = kp2ds_body + + meta = { + "width": width, + "height": height, + "keypoints_body": kp2ds_body, + "keypoints_left_hand": kp2ds_lhand, + "keypoints_right_hand": kp2ds_rhand, + "keypoints_face": kp2ds_face, + } + metas.append(meta) + return metas \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pyproject.toml b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..7364186aedf9e3f31921262ffd7d059e4781953e --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "ComfyUI-WanAnimatePreprocess" +description = "ComfyUI nodes for WanAnimate input processing" +version = "1.0.2" +license = {file = "LICENSE"} +dependencies = ["opencv-python", "onnxruntime-gpu", "onnx"] + +[project.urls] +Repository = "https://github.com/kijai/ComfyUI-WanAnimatePreprocess" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "kijai" +DisplayName = "ComfyUI-WanAnimatePreprocess" +Icon = "" diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/readme.md b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..9e9c0994239661d1abe1220d6c510fe47696e25e --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/readme.md @@ -0,0 +1,29 @@ +## ComfyUI helper nodes for [Wan video 2.2 Animate preprocessing](https://github.com/Wan-Video/Wan2.2/tree/main/wan/modules/animate/preprocess) + + +Nodes to run the ViTPose model, get face crops and keypoint list for SAM2 segmentation. + +Models: + +to `ComfyUI/models/detection` (subject to change in the future) + +YOLO: + +https://huggingface.co/Wan-AI/Wan2.2-Animate-14B/blob/main/process_checkpoint/det/yolov10m.onnx + +ViTPose ONNX: + +Use either the Large model from here: + +https://huggingface.co/JunkyByte/easy_ViTPose/tree/main/onnx/wholebody + +Or the Huge model like in the original code, it's split into two files due to ONNX file size limit: + +Both files need to be in same directory, and the onnx file selected in the model loader: + +`vitpose_h_wholebody_data.bin` and `vitpose_h_wholebody_model.onnx` + +https://huggingface.co/Kijai/vitpose_comfy/tree/main/onnx + + + diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/requirements.txt b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..16d01a358f186c4e920096b5115ded9a956e7d50 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/requirements.txt @@ -0,0 +1,3 @@ +onnx +onnxruntime-gpu +opencv-python \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/retarget_pose.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/retarget_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..714fda616d3eb462ed3f01eac8a0de11cf696137 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/retarget_pose.py @@ -0,0 +1,843 @@ +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. +import numpy as np +from tqdm import tqdm +import math +from typing import NamedTuple +import copy +from .pose_utils.pose2d_utils import AAPoseMeta + +# load skeleton name and bone lines +keypoint_list = [ + "Nose", + "Neck", + "RShoulder", + "RElbow", + "RWrist", # No.4 + "LShoulder", + "LElbow", + "LWrist", # No.7 + "RHip", + "RKnee", + "RAnkle", # No.10 + "LHip", + "LKnee", + "LAnkle", # No.13 + "REye", + "LEye", + "REar", + "LEar", + "LToe", + "RToe", +] + + +limbSeq = [ + [2, 3], [2, 6], # shoulders + [3, 4], [4, 5], # left arm + [6, 7], [7, 8], # right arm + [2, 9], [9, 10], [10, 11], # right leg + [2, 12], [12, 13], [13, 14], # left leg + [2, 1], [1, 15], [15, 17], [1, 16], [16, 18], # face (nose, eyes, ears) + [14, 19], # left foot + [11, 20] # right foot +] + +eps = 0.01 + +class Keypoint(NamedTuple): + x: float + y: float + score: float = 1.0 + id: int = -1 + + +# for each limb, calculate src & dst bone's length +# and calculate their ratios +def get_length(skeleton, limb): + + k1_index, k2_index = limb + + H, W = skeleton['height'], skeleton['width'] + keypoints = skeleton['keypoints_body'] + keypoint1 = keypoints[k1_index - 1] + keypoint2 = keypoints[k2_index - 1] + + if keypoint1 is None or keypoint2 is None: + return None, None, None + + X = np.array([keypoint1[0], keypoint2[0]]) * float(W) + Y = np.array([keypoint1[1], keypoint2[1]]) * float(H) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + + return X, Y, length + + + +def get_handpose_meta(keypoints, delta, src_H, src_W): + + new_keypoints = [] + + for idx, keypoint in enumerate(keypoints): + if keypoint is None: + new_keypoints.append(None) + continue + if keypoint.score == 0: + new_keypoints.append(None) + continue + + x, y = keypoint.x, keypoint.y + x = int(x * src_W + delta[0]) + y = int(y * src_H + delta[1]) + + new_keypoints.append( + Keypoint( + x=x, + y=y, + score=keypoint.score, + )) + + return new_keypoints + + +def deal_hand_keypoints(hand_res, r_ratio, l_ratio, hand_score_th = 0.5): + + left_hand = [] + right_hand = [] + + left_delta_x = hand_res['left'][0][0] * (l_ratio - 1) + left_delta_y = hand_res['left'][0][1] * (l_ratio - 1) + + right_delta_x = hand_res['right'][0][0] * (r_ratio - 1) + right_delta_y = hand_res['right'][0][1] * (r_ratio - 1) + + length = len(hand_res['left']) + + for i in range(length): + # left hand + if hand_res['left'][i][2] < hand_score_th: + left_hand.append( + Keypoint( + x=-1, + y=-1, + score=0, + ) + ) + else: + left_hand.append( + Keypoint( + x=hand_res['left'][i][0] * l_ratio - left_delta_x, + y=hand_res['left'][i][1] * l_ratio - left_delta_y, + score = hand_res['left'][i][2] + ) + ) + + # right hand + if hand_res['right'][i][2] < hand_score_th: + right_hand.append( + Keypoint( + x=-1, + y=-1, + score=0, + ) + ) + else: + right_hand.append( + Keypoint( + x=hand_res['right'][i][0] * r_ratio - right_delta_x, + y=hand_res['right'][i][1] * r_ratio - right_delta_y, + score = hand_res['right'][i][2] + ) + ) + + return right_hand, left_hand + + +def get_scaled_pose(canvas, src_canvas, keypoints, keypoints_hand, bone_ratio_list, delta_ground_x, delta_ground_y, + rescaled_src_ground_x, body_flag, id, scale_min, threshold = 0.4): + + H, W = canvas + src_H, src_W = src_canvas + + new_length_list = [ ] + angle_list = [ ] + + # keypoints from 0-1 to H/W range + for idx in range(len(keypoints)): + if keypoints[idx] is None or len(keypoints[idx]) == 0: + continue + + keypoints[idx] = [keypoints[idx][0] * src_W, keypoints[idx][1] * src_H, keypoints[idx][2]] + + # first traverse, get new_length_list and angle_list + for idx, (k1_index, k2_index) in enumerate(limbSeq): + keypoint1 = keypoints[k1_index - 1] + keypoint2 = keypoints[k2_index - 1] + + if keypoint1 is None or keypoint2 is None or len(keypoint1) == 0 or len(keypoint2) == 0: + new_length_list.append(None) + angle_list.append(None) + continue + + Y = np.array([keypoint1[0], keypoint2[0]]) #* float(W) + X = np.array([keypoint1[1], keypoint2[1]]) #* float(H) + + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + + new_length = length * bone_ratio_list[idx] + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + + new_length_list.append(new_length) + angle_list.append(angle) + + # Keep foot length within 0.5x calf length + foot_lower_leg_ratio = 0.5 + if new_length_list[8] != None and new_length_list[18] != None: + if new_length_list[18] > new_length_list[8] * foot_lower_leg_ratio: + new_length_list[18] = new_length_list[8] * foot_lower_leg_ratio + + if new_length_list[11] != None and new_length_list[17] != None: + if new_length_list[17] > new_length_list[11] * foot_lower_leg_ratio: + new_length_list[17] = new_length_list[11] * foot_lower_leg_ratio + + # second traverse, calculate new keypoints + rescale_keypoints = keypoints.copy() + + for idx, (k1_index, k2_index) in enumerate(limbSeq): + # update dst_keypoints + start_keypoint = rescale_keypoints[k1_index - 1] + new_length = new_length_list[idx] + angle = angle_list[idx] + + if rescale_keypoints[k1_index - 1] is None or rescale_keypoints[k2_index - 1] is None or \ + len(rescale_keypoints[k1_index - 1]) == 0 or len(rescale_keypoints[k2_index - 1]) == 0: + continue + + # calculate end_keypoint + delta_x = new_length * math.cos(math.radians(angle)) + delta_y = new_length * math.sin(math.radians(angle)) + + end_keypoint_x = start_keypoint[0] - delta_x + end_keypoint_y = start_keypoint[1] - delta_y + + # update keypoints + rescale_keypoints[k2_index - 1] = [end_keypoint_x, end_keypoint_y, rescale_keypoints[k2_index - 1][2]] + + if id == 0: + if body_flag == 'full_body' and rescale_keypoints[8] != None and rescale_keypoints[11] != None: + delta_ground_x_offset_first_frame = (rescale_keypoints[8][0] + rescale_keypoints[11][0]) / 2 - rescaled_src_ground_x + delta_ground_x += delta_ground_x_offset_first_frame + elif body_flag == 'half_body' and rescale_keypoints[1] != None: + delta_ground_x_offset_first_frame = rescale_keypoints[1][0] - rescaled_src_ground_x + delta_ground_x += delta_ground_x_offset_first_frame + + # offset all keypoints + for idx in range(len(rescale_keypoints)): + if rescale_keypoints[idx] is None or len(rescale_keypoints[idx]) == 0 : + continue + rescale_keypoints[idx][0] -= delta_ground_x + rescale_keypoints[idx][1] -= delta_ground_y + + # rescale keypoints to original size + rescale_keypoints[idx][0] /= scale_min + rescale_keypoints[idx][1] /= scale_min + + # Scale hand proportions based on body skeletal ratios + r_ratio = max(bone_ratio_list[0], bone_ratio_list[1]) / scale_min + l_ratio = max(bone_ratio_list[0], bone_ratio_list[1]) / scale_min + left_hand, right_hand = deal_hand_keypoints(keypoints_hand, r_ratio, l_ratio, hand_score_th = threshold) + + left_hand_new = left_hand.copy() + right_hand_new = right_hand.copy() + + if rescale_keypoints[4] == None and rescale_keypoints[7] == None: + pass + + elif rescale_keypoints[4] == None and rescale_keypoints[7] != None: + right_hand_delta = np.array(rescale_keypoints[7][:2]) - np.array(keypoints[7][:2]) + right_hand_new = get_handpose_meta(right_hand, right_hand_delta, src_H, src_W) + + elif rescale_keypoints[4] != None and rescale_keypoints[7] == None: + left_hand_delta = np.array(rescale_keypoints[4][:2]) - np.array(keypoints[4][:2]) + left_hand_new = get_handpose_meta(left_hand, left_hand_delta, src_H, src_W) + + else: + # get left_hand and right_hand offset + left_hand_delta = np.array(rescale_keypoints[4][:2]) - np.array(keypoints[4][:2]) + right_hand_delta = np.array(rescale_keypoints[7][:2]) - np.array(keypoints[7][:2]) + + if keypoints[4][0] != None and left_hand[0].x != -1: + left_hand_root_offset = np.array( ( keypoints[4][0] - left_hand[0].x * src_W, keypoints[4][1] - left_hand[0].y * src_H)) + left_hand_delta += left_hand_root_offset + + if keypoints[7][0] != None and right_hand[0].x != -1: + right_hand_root_offset = np.array( ( keypoints[7][0] - right_hand[0].x * src_W, keypoints[7][1] - right_hand[0].y * src_H)) + right_hand_delta += right_hand_root_offset + + dis_left_hand = ((keypoints[4][0] - left_hand[0].x * src_W) ** 2 + (keypoints[4][1] - left_hand[0].y * src_H) ** 2) ** 0.5 + dis_right_hand = ((keypoints[7][0] - left_hand[0].x * src_W) ** 2 + (keypoints[7][1] - left_hand[0].y * src_H) ** 2) ** 0.5 + + if dis_left_hand > dis_right_hand: + right_hand_new = get_handpose_meta(left_hand, right_hand_delta, src_H, src_W) + left_hand_new = get_handpose_meta(right_hand, left_hand_delta, src_H, src_W) + else: + left_hand_new = get_handpose_meta(left_hand, left_hand_delta, src_H, src_W) + right_hand_new = get_handpose_meta(right_hand, right_hand_delta, src_H, src_W) + + # get normalized keypoints_body + norm_body_keypoints = [ ] + for body_keypoint in rescale_keypoints: + if body_keypoint != None: + norm_body_keypoints.append([body_keypoint[0] / W , body_keypoint[1] / H, body_keypoint[2]]) + else: + norm_body_keypoints.append(None) + + frame_info = { + 'height': H, + 'width': W, + 'keypoints_body': norm_body_keypoints, + 'keypoints_left_hand' : left_hand_new, + 'keypoints_right_hand' : right_hand_new, + } + + return frame_info + + +def rescale_skeleton(H, W, keypoints, bone_ratio_list): + + rescale_keypoints = keypoints.copy() + + new_length_list = [ ] + angle_list = [ ] + + # keypoints from 0-1 to H/W range + for idx in range(len(rescale_keypoints)): + if rescale_keypoints[idx] is None or len(rescale_keypoints[idx]) == 0: + continue + + rescale_keypoints[idx] = [rescale_keypoints[idx][0] * W, rescale_keypoints[idx][1] * H] + + # first traverse, get new_length_list and angle_list + for idx, (k1_index, k2_index) in enumerate(limbSeq): + keypoint1 = rescale_keypoints[k1_index - 1] + keypoint2 = rescale_keypoints[k2_index - 1] + + if keypoint1 is None or keypoint2 is None or len(keypoint1) == 0 or len(keypoint2) == 0: + new_length_list.append(None) + angle_list.append(None) + continue + + Y = np.array([keypoint1[0], keypoint2[0]]) #* float(W) + X = np.array([keypoint1[1], keypoint2[1]]) #* float(H) + + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + + + new_length = length * bone_ratio_list[idx] + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + + new_length_list.append(new_length) + angle_list.append(angle) + + # # second traverse, calculate new keypoints + for idx, (k1_index, k2_index) in enumerate(limbSeq): + # update dst_keypoints + start_keypoint = rescale_keypoints[k1_index - 1] + new_length = new_length_list[idx] + angle = angle_list[idx] + + if rescale_keypoints[k1_index - 1] is None or rescale_keypoints[k2_index - 1] is None or \ + len(rescale_keypoints[k1_index - 1]) == 0 or len(rescale_keypoints[k2_index - 1]) == 0: + continue + + # calculate end_keypoint + delta_x = new_length * math.cos(math.radians(angle)) + delta_y = new_length * math.sin(math.radians(angle)) + + end_keypoint_x = start_keypoint[0] - delta_x + end_keypoint_y = start_keypoint[1] - delta_y + + # update keypoints + rescale_keypoints[k2_index - 1] = [end_keypoint_x, end_keypoint_y] + + return rescale_keypoints + + +def fix_lack_keypoints_use_sym(skeleton): + + keypoints = skeleton['keypoints_body'] + H, W = skeleton['height'], skeleton['width'] + + limb_points_list = [ + [3, 4, 5], + [6, 7, 8], + [12, 13, 14, 19], + [9, 10, 11, 20], + ] + + for limb_points in limb_points_list: + miss_flag = False + for point in limb_points: + if keypoints[point - 1] is None: + miss_flag = True + continue + if miss_flag: + skeleton['keypoints_body'][point - 1] = None + + repair_limb_seq_left = [ + [3, 4], [4, 5], # left arm + [12, 13], [13, 14], # left leg + [14, 19] # left foot + ] + + repair_limb_seq_right = [ + [6, 7], [7, 8], # right arm + [9, 10], [10, 11], # right leg + [11, 20] # right foot + ] + + repair_limb_seq = [repair_limb_seq_left, repair_limb_seq_right] + + for idx_part, part in enumerate(repair_limb_seq): + for idx, limb in enumerate(part): + + k1_index, k2_index = limb + keypoint1 = keypoints[k1_index - 1] + keypoint2 = keypoints[k2_index - 1] + + if keypoint1 != None and keypoint2 is None: + # reference to symmetric limb + sym_limb = repair_limb_seq[1-idx_part][idx] + k1_index_sym, k2_index_sym = sym_limb + keypoint1_sym = keypoints[k1_index_sym - 1] + keypoint2_sym = keypoints[k2_index_sym - 1] + ref_length = 0 + + if keypoint1_sym != None and keypoint2_sym != None: + X = np.array([keypoint1_sym[0], keypoint2_sym[0]]) * float(W) + Y = np.array([keypoint1_sym[1], keypoint2_sym[1]]) * float(H) + ref_length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + else: + ref_length_left, ref_length_right = 0, 0 + if keypoints[1] != None and keypoints[8] != None: + X = np.array([keypoints[1][0], keypoints[8][0]]) * float(W) + Y = np.array([keypoints[1][1], keypoints[8][1]]) * float(H) + ref_length_left = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + if idx <= 1: # arms + ref_length_left /= 2 + + if keypoints[1] != None and keypoints[11] != None: + X = np.array([keypoints[1][0], keypoints[11][0]]) * float(W) + Y = np.array([keypoints[1][1], keypoints[11][1]]) * float(H) + ref_length_right = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + if idx <= 1: # arms + ref_length_right /= 2 + elif idx == 4: # foot + ref_length_right /= 5 + + ref_length = max(ref_length_left, ref_length_right) + + if ref_length != 0: + skeleton['keypoints_body'][k2_index - 1] = [0, 0] #init + skeleton['keypoints_body'][k2_index - 1][0] = skeleton['keypoints_body'][k1_index - 1][0] + skeleton['keypoints_body'][k2_index - 1][1] = skeleton['keypoints_body'][k1_index - 1][1] + ref_length / H + return skeleton + + +def rescale_shorten_skeleton(ratio_list, src_length_list, dst_length_list): + + modify_bone_list = [ + [0, 1], + [2, 4], + [3, 5], + [6, 9], + [7, 10], + [8, 11], + [17, 18] + ] + + for modify_bone in modify_bone_list: + new_ratio = max(ratio_list[modify_bone[0]], ratio_list[modify_bone[1]]) + ratio_list[modify_bone[0]] = new_ratio + ratio_list[modify_bone[1]] = new_ratio + + if ratio_list[13]!= None and ratio_list[15]!= None: + ratio_eye_avg = (ratio_list[13] + ratio_list[15]) / 2 + ratio_list[13] = ratio_eye_avg + ratio_list[15] = ratio_eye_avg + + if ratio_list[14]!= None and ratio_list[16]!= None: + ratio_eye_avg = (ratio_list[14] + ratio_list[16]) / 2 + ratio_list[14] = ratio_eye_avg + ratio_list[16] = ratio_eye_avg + + return ratio_list, src_length_list, dst_length_list + + + +def check_full_body(keypoints, threshold = 0.4): + + body_flag = 'half_body' + + # 1. If ankle points exist, confidence is greater than the threshold, and points do not exceed the frame, return full_body + if keypoints[10] != None and keypoints[13] != None and keypoints[8] != None and keypoints[11] != None: + if (keypoints[10][1] <= 1 and keypoints[13][1] <= 1) and (keypoints[10][2] >= threshold and keypoints[13][2] >= threshold) and \ + (keypoints[8][1] <= 1 and keypoints[11][1] <= 1) and (keypoints[8][2] >= threshold and keypoints[11][2] >= threshold): + body_flag = 'full_body' + return body_flag + + # 2. If hip points exist, return three_quarter_body + if (keypoints[8] != None and keypoints[11] != None): + if (keypoints[8][1] <= 1 and keypoints[11][1] <= 1) and (keypoints[8][2] >= threshold and keypoints[11][2] >= threshold): + body_flag = 'three_quarter_body' + return body_flag + + return body_flag + + +def check_full_body_both(flag1, flag2): + body_flag_dict = { + 'full_body': 2, + 'three_quarter_body' : 1, + 'half_body': 0 + } + + body_flag_dict_reverse = { + 2: 'full_body', + 1: 'three_quarter_body', + 0: 'half_body' + } + + flag1_num = body_flag_dict[flag1] + flag2_num = body_flag_dict[flag2] + flag_both_num = min(flag1_num, flag2_num) + return body_flag_dict_reverse[flag_both_num] + + +def write_to_poses(data_to_json, none_idx, dst_shape, bone_ratio_list, delta_ground_x, delta_ground_y, rescaled_src_ground_x, body_flag, scale_min): + outputs = [] + length = len(data_to_json) + for id in tqdm(range(length)): + + src_height, src_width = data_to_json[id]['height'], data_to_json[id]['width'] + width, height = dst_shape + keypoints = data_to_json[id]['keypoints_body'] + for idx in range(len(keypoints)): + if idx in none_idx: + keypoints[idx] = None + new_keypoints = keypoints.copy() + + # get hand keypoints + keypoints_hand = {'left' : data_to_json[id]['keypoints_left_hand'], 'right' : data_to_json[id]['keypoints_right_hand']} + # Normalize hand coordinates to 0-1 range + for hand_idx in range(len(data_to_json[id]['keypoints_left_hand'])): + data_to_json[id]['keypoints_left_hand'][hand_idx][0] = data_to_json[id]['keypoints_left_hand'][hand_idx][0] / src_width + data_to_json[id]['keypoints_left_hand'][hand_idx][1] = data_to_json[id]['keypoints_left_hand'][hand_idx][1] / src_height + + for hand_idx in range(len(data_to_json[id]['keypoints_right_hand'])): + data_to_json[id]['keypoints_right_hand'][hand_idx][0] = data_to_json[id]['keypoints_right_hand'][hand_idx][0] / src_width + data_to_json[id]['keypoints_right_hand'][hand_idx][1] = data_to_json[id]['keypoints_right_hand'][hand_idx][1] / src_height + + + frame_info = get_scaled_pose((height, width), (src_height, src_width), new_keypoints, keypoints_hand, bone_ratio_list, delta_ground_x, delta_ground_y, rescaled_src_ground_x, body_flag, id, scale_min) + outputs.append(frame_info) + + return outputs + + +def calculate_scale_ratio(skeleton, skeleton_edit, scale_ratio_flag): + if scale_ratio_flag: + + headw = max(skeleton['keypoints_body'][0][0], skeleton['keypoints_body'][14][0], skeleton['keypoints_body'][15][0], skeleton['keypoints_body'][16][0], skeleton['keypoints_body'][17][0]) - \ + min(skeleton['keypoints_body'][0][0], skeleton['keypoints_body'][14][0], skeleton['keypoints_body'][15][0], skeleton['keypoints_body'][16][0], skeleton['keypoints_body'][17][0]) + headw_edit = max(skeleton_edit['keypoints_body'][0][0], skeleton_edit['keypoints_body'][14][0], skeleton_edit['keypoints_body'][15][0], skeleton_edit['keypoints_body'][16][0], skeleton_edit['keypoints_body'][17][0]) - \ + min(skeleton_edit['keypoints_body'][0][0], skeleton_edit['keypoints_body'][14][0], skeleton_edit['keypoints_body'][15][0], skeleton_edit['keypoints_body'][16][0], skeleton_edit['keypoints_body'][17][0]) + headw_ratio = headw / headw_edit + + _, _, shoulder = get_length(skeleton, [6,3]) + _, _, shoulder_edit = get_length(skeleton_edit, [6,3]) + shoulder_ratio = shoulder / shoulder_edit + + return max(headw_ratio, shoulder_ratio) + + else: + return 1 + + + +def retarget_pose(src_skeleton, dst_skeleton, all_src_skeleton, src_skeleton_edit, dst_skeleton_edit, threshold=0.4): + + if src_skeleton_edit is not None and dst_skeleton_edit is not None: + use_edit_for_base = True + else: + use_edit_for_base = False + + src_skeleton_ori = copy.deepcopy(src_skeleton) + + dst_skeleton_ori_h, dst_skeleton_ori_w = dst_skeleton['height'], dst_skeleton['width'] + if src_skeleton['keypoints_body'][0] != None and src_skeleton['keypoints_body'][10] != None and src_skeleton['keypoints_body'][13] != None and \ + dst_skeleton['keypoints_body'][0] != None and dst_skeleton['keypoints_body'][10] != None and dst_skeleton['keypoints_body'][13] != None and \ + src_skeleton['keypoints_body'][0][2] > 0.5 and src_skeleton['keypoints_body'][10][2] > 0.5 and src_skeleton['keypoints_body'][13][2] > 0.5 and \ + dst_skeleton['keypoints_body'][0][2] > 0.5 and dst_skeleton['keypoints_body'][10][2] > 0.5 and dst_skeleton['keypoints_body'][13][2] > 0.5: + + src_height = src_skeleton['height'] * abs( + (src_skeleton['keypoints_body'][10][1] + src_skeleton['keypoints_body'][13][1]) / 2 - + src_skeleton['keypoints_body'][0][1]) + dst_height = dst_skeleton['height'] * abs( + (dst_skeleton['keypoints_body'][10][1] + dst_skeleton['keypoints_body'][13][1]) / 2 - + dst_skeleton['keypoints_body'][0][1]) + scale_min = 1.0 * src_height / dst_height + elif src_skeleton['keypoints_body'][0] != None and src_skeleton['keypoints_body'][8] != None and src_skeleton['keypoints_body'][11] != None and \ + dst_skeleton['keypoints_body'][0] != None and dst_skeleton['keypoints_body'][8] != None and dst_skeleton['keypoints_body'][11] != None and \ + src_skeleton['keypoints_body'][0][2] > 0.5 and src_skeleton['keypoints_body'][8][2] > 0.5 and src_skeleton['keypoints_body'][11][2] > 0.5 and \ + dst_skeleton['keypoints_body'][0][2] > 0.5 and dst_skeleton['keypoints_body'][8][2] > 0.5 and dst_skeleton['keypoints_body'][11][2] > 0.5: + + src_height = src_skeleton['height'] * abs( + (src_skeleton['keypoints_body'][8][1] + src_skeleton['keypoints_body'][11][1]) / 2 - + src_skeleton['keypoints_body'][0][1]) + dst_height = dst_skeleton['height'] * abs( + (dst_skeleton['keypoints_body'][8][1] + dst_skeleton['keypoints_body'][11][1]) / 2 - + dst_skeleton['keypoints_body'][0][1]) + scale_min = 1.0 * src_height / dst_height + else: + scale_min = np.sqrt(src_skeleton['height'] * src_skeleton['width']) / np.sqrt(dst_skeleton['height'] * dst_skeleton['width']) + + if use_edit_for_base: + scale_ratio_flag = False + if src_skeleton_edit['keypoints_body'][0] != None and src_skeleton_edit['keypoints_body'][10] != None and src_skeleton_edit['keypoints_body'][13] != None and \ + dst_skeleton_edit['keypoints_body'][0] != None and dst_skeleton_edit['keypoints_body'][10] != None and dst_skeleton_edit['keypoints_body'][13] != None and \ + src_skeleton_edit['keypoints_body'][0][2] > 0.5 and src_skeleton_edit['keypoints_body'][10][2] > 0.5 and src_skeleton_edit['keypoints_body'][13][2] > 0.5 and \ + dst_skeleton_edit['keypoints_body'][0][2] > 0.5 and dst_skeleton_edit['keypoints_body'][10][2] > 0.5 and dst_skeleton_edit['keypoints_body'][13][2] > 0.5: + + src_height_edit = src_skeleton_edit['height'] * abs( + (src_skeleton_edit['keypoints_body'][10][1] + src_skeleton_edit['keypoints_body'][13][1]) / 2 - + src_skeleton_edit['keypoints_body'][0][1]) + dst_height_edit = dst_skeleton_edit['height'] * abs( + (dst_skeleton_edit['keypoints_body'][10][1] + dst_skeleton_edit['keypoints_body'][13][1]) / 2 - + dst_skeleton_edit['keypoints_body'][0][1]) + scale_min_edit = 1.0 * src_height_edit / dst_height_edit + elif src_skeleton_edit['keypoints_body'][0] != None and src_skeleton_edit['keypoints_body'][8] != None and src_skeleton_edit['keypoints_body'][11] != None and \ + dst_skeleton_edit['keypoints_body'][0] != None and dst_skeleton_edit['keypoints_body'][8] != None and dst_skeleton_edit['keypoints_body'][11] != None and \ + src_skeleton_edit['keypoints_body'][0][2] > 0.5 and src_skeleton_edit['keypoints_body'][8][2] > 0.5 and src_skeleton_edit['keypoints_body'][11][2] > 0.5 and \ + dst_skeleton_edit['keypoints_body'][0][2] > 0.5 and dst_skeleton_edit['keypoints_body'][8][2] > 0.5 and dst_skeleton_edit['keypoints_body'][11][2] > 0.5: + + src_height_edit = src_skeleton_edit['height'] * abs( + (src_skeleton_edit['keypoints_body'][8][1] + src_skeleton_edit['keypoints_body'][11][1]) / 2 - + src_skeleton_edit['keypoints_body'][0][1]) + dst_height_edit = dst_skeleton_edit['height'] * abs( + (dst_skeleton_edit['keypoints_body'][8][1] + dst_skeleton_edit['keypoints_body'][11][1]) / 2 - + dst_skeleton_edit['keypoints_body'][0][1]) + scale_min_edit = 1.0 * src_height_edit / dst_height_edit + else: + scale_min_edit = np.sqrt(src_skeleton_edit['height'] * src_skeleton_edit['width']) / np.sqrt(dst_skeleton_edit['height'] * dst_skeleton_edit['width']) + scale_ratio_flag = True + + # Flux may change the scale, compensate for it here + ratio_src = calculate_scale_ratio(src_skeleton, src_skeleton_edit, scale_ratio_flag) + ratio_dst = calculate_scale_ratio(dst_skeleton, dst_skeleton_edit, scale_ratio_flag) + + dst_skeleton_edit['height'] = int(dst_skeleton_edit['height'] * scale_min_edit) + dst_skeleton_edit['width'] = int(dst_skeleton_edit['width'] * scale_min_edit) + for idx in range(len(dst_skeleton_edit['keypoints_left_hand'])): + dst_skeleton_edit['keypoints_left_hand'][idx][0] *= scale_min_edit + dst_skeleton_edit['keypoints_left_hand'][idx][1] *= scale_min_edit + for idx in range(len(dst_skeleton_edit['keypoints_right_hand'])): + dst_skeleton_edit['keypoints_right_hand'][idx][0] *= scale_min_edit + dst_skeleton_edit['keypoints_right_hand'][idx][1] *= scale_min_edit + + + dst_skeleton['height'] = int(dst_skeleton['height'] * scale_min) + dst_skeleton['width'] = int(dst_skeleton['width'] * scale_min) + for idx in range(len(dst_skeleton['keypoints_left_hand'])): + dst_skeleton['keypoints_left_hand'][idx][0] *= scale_min + dst_skeleton['keypoints_left_hand'][idx][1] *= scale_min + for idx in range(len(dst_skeleton['keypoints_right_hand'])): + dst_skeleton['keypoints_right_hand'][idx][0] *= scale_min + dst_skeleton['keypoints_right_hand'][idx][1] *= scale_min + + + dst_body_flag = check_full_body(dst_skeleton['keypoints_body'], threshold) + src_body_flag = check_full_body(src_skeleton_ori['keypoints_body'], threshold) + body_flag = check_full_body_both(dst_body_flag, src_body_flag) + #print('body_flag: ', body_flag) + + if use_edit_for_base: + src_skeleton_edit = fix_lack_keypoints_use_sym(src_skeleton_edit) + dst_skeleton_edit = fix_lack_keypoints_use_sym(dst_skeleton_edit) + else: + src_skeleton = fix_lack_keypoints_use_sym(src_skeleton) + dst_skeleton = fix_lack_keypoints_use_sym(dst_skeleton) + + none_idx = [] + for idx in range(len(dst_skeleton['keypoints_body'])): + if dst_skeleton['keypoints_body'][idx] == None or src_skeleton['keypoints_body'][idx] == None: + src_skeleton['keypoints_body'][idx] = None + dst_skeleton['keypoints_body'][idx] = None + none_idx.append(idx) + + # get bone ratio list + ratio_list, src_length_list, dst_length_list = [], [], [] + for idx, limb in enumerate(limbSeq): + if use_edit_for_base: + src_X, src_Y, src_length = get_length(src_skeleton_edit, limb) + dst_X, dst_Y, dst_length = get_length(dst_skeleton_edit, limb) + + if src_X is None or src_Y is None or dst_X is None or dst_Y is None: + ratio = -1 + else: + ratio = 1.0 * dst_length * ratio_dst / src_length / ratio_src + + else: + src_X, src_Y, src_length = get_length(src_skeleton, limb) + dst_X, dst_Y, dst_length = get_length(dst_skeleton, limb) + + if src_X is None or src_Y is None or dst_X is None or dst_Y is None: + ratio = -1 + else: + ratio = 1.0 * dst_length / src_length + + ratio_list.append(ratio) + src_length_list.append(src_length) + dst_length_list.append(dst_length) + + for idx, ratio in enumerate(ratio_list): + if ratio == -1: + if ratio_list[0] != -1 and ratio_list[1] != -1: + ratio_list[idx] = (ratio_list[0] + ratio_list[1]) / 2 + + # Consider adding constraints when Flux fails to correct head pose, causing neck issues. + # if ratio_list[12] > (ratio_list[0]+ratio_list[1])/2*1.25: + # ratio_list[12] = (ratio_list[0]+ratio_list[1])/2*1.25 + + ratio_list, src_length_list, dst_length_list = rescale_shorten_skeleton(ratio_list, src_length_list, dst_length_list) + + rescaled_src_skeleton_ori = rescale_skeleton(src_skeleton_ori['height'], src_skeleton_ori['width'], + src_skeleton_ori['keypoints_body'], ratio_list) + + # get global translation offset_x and offset_y + if body_flag == 'full_body': + #print('use foot mark.') + dst_ground_y = max(dst_skeleton['keypoints_body'][10][1], dst_skeleton['keypoints_body'][13][1]) * dst_skeleton[ + 'height'] + # The midpoint between toe and ankle + if dst_skeleton['keypoints_body'][18] != None and dst_skeleton['keypoints_body'][19] != None: + right_foot_mid = (dst_skeleton['keypoints_body'][10][1] + dst_skeleton['keypoints_body'][19][1]) / 2 + left_foot_mid = (dst_skeleton['keypoints_body'][13][1] + dst_skeleton['keypoints_body'][18][1]) / 2 + dst_ground_y = max(left_foot_mid, right_foot_mid) * dst_skeleton['height'] + + rescaled_src_ground_y = max(rescaled_src_skeleton_ori[10][1], rescaled_src_skeleton_ori[13][1]) + delta_ground_y = rescaled_src_ground_y - dst_ground_y + + dst_ground_x = (dst_skeleton['keypoints_body'][8][0] + dst_skeleton['keypoints_body'][11][0]) * dst_skeleton[ + 'width'] / 2 + rescaled_src_ground_x = (rescaled_src_skeleton_ori[8][0] + rescaled_src_skeleton_ori[11][0]) / 2 + delta_ground_x = rescaled_src_ground_x - dst_ground_x + delta_x, delta_y = delta_ground_x, delta_ground_y + + else: + #print('use neck mark.') + # use neck keypoint as mark + src_neck_y = rescaled_src_skeleton_ori[1][1] + dst_neck_y = dst_skeleton['keypoints_body'][1][1] + delta_neck_y = src_neck_y - dst_neck_y * dst_skeleton['height'] + + src_neck_x = rescaled_src_skeleton_ori[1][0] + dst_neck_x = dst_skeleton['keypoints_body'][1][0] + delta_neck_x = src_neck_x - dst_neck_x * dst_skeleton['width'] + delta_x, delta_y = delta_neck_x, delta_neck_y + rescaled_src_ground_x = src_neck_x + + + dst_shape = (dst_skeleton_ori_w, dst_skeleton_ori_h) + output = write_to_poses(all_src_skeleton, none_idx, dst_shape, ratio_list, delta_x, delta_y, + rescaled_src_ground_x, body_flag, scale_min) + return output + + +def get_retarget_pose(tpl_pose_meta0, refer_pose_meta, tpl_pose_metas, tql_edit_pose_meta0, refer_edit_pose_meta): + + for key, value in tpl_pose_meta0.items(): + if type(value) is np.ndarray: + if key in ['keypoints_left_hand', 'keypoints_right_hand']: + value = value * np.array([[tpl_pose_meta0["width"], tpl_pose_meta0["height"], 1.0]]) + if not isinstance(value, list): + value = value.tolist() + tpl_pose_meta0[key] = value + + for key, value in refer_pose_meta.items(): + if type(value) is np.ndarray: + if key in ['keypoints_left_hand', 'keypoints_right_hand']: + value = value * np.array([[refer_pose_meta["width"], refer_pose_meta["height"], 1.0]]) + if not isinstance(value, list): + value = value.tolist() + refer_pose_meta[key] = value + + tpl_pose_metas_new = [] + for meta in tpl_pose_metas: + for key, value in meta.items(): + if type(value) is np.ndarray: + if key in ['keypoints_left_hand', 'keypoints_right_hand']: + value = value * np.array([[meta["width"], meta["height"], 1.0]]) + if not isinstance(value, list): + value = value.tolist() + meta[key] = value + tpl_pose_metas_new.append(meta) + + if tql_edit_pose_meta0 is not None: + for key, value in tql_edit_pose_meta0.items(): + if type(value) is np.ndarray: + if key in ['keypoints_left_hand', 'keypoints_right_hand']: + value = value * np.array([[tql_edit_pose_meta0["width"], tql_edit_pose_meta0["height"], 1.0]]) + if not isinstance(value, list): + value = value.tolist() + tql_edit_pose_meta0[key] = value + + if refer_edit_pose_meta is not None: + for key, value in refer_edit_pose_meta.items(): + if type(value) is np.ndarray: + if key in ['keypoints_left_hand', 'keypoints_right_hand']: + value = value * np.array([[refer_edit_pose_meta["width"], refer_edit_pose_meta["height"], 1.0]]) + if not isinstance(value, list): + value = value.tolist() + refer_edit_pose_meta[key] = value + + retarget_tpl_pose_metas = retarget_pose(tpl_pose_meta0, refer_pose_meta, tpl_pose_metas_new, tql_edit_pose_meta0, refer_edit_pose_meta) + + pose_metas = [] + for meta in retarget_tpl_pose_metas: + pose_meta = AAPoseMeta() + width, height = meta["width"], meta["height"] + pose_meta.width = width + pose_meta.height = height + pose_meta.kps_body = np.array(meta["keypoints_body"])[:, :2] * (width, height) + pose_meta.kps_body_p = np.array(meta["keypoints_body"])[:, 2] + + kps_lhand = [] + kps_lhand_p = [] + for each_kps_lhand in meta["keypoints_left_hand"]: + if each_kps_lhand is not None: + kps_lhand.append([each_kps_lhand.x, each_kps_lhand.y]) + kps_lhand_p.append(each_kps_lhand.score) + else: + kps_lhand.append([None, None]) + kps_lhand_p.append(0.0) + + pose_meta.kps_lhand = np.array(kps_lhand) + pose_meta.kps_lhand_p = np.array(kps_lhand_p) + + kps_rhand = [] + kps_rhand_p = [] + for each_kps_rhand in meta["keypoints_right_hand"]: + if each_kps_rhand is not None: + kps_rhand.append([each_kps_rhand.x, each_kps_rhand.y]) + kps_rhand_p.append(each_kps_rhand.score) + else: + kps_rhand.append([None, None]) + kps_rhand_p.append(0.0) + + pose_meta.kps_rhand = np.array(kps_rhand) + pose_meta.kps_rhand_p = np.array(kps_rhand_p) + + pose_metas.append(pose_meta) + + return pose_metas + diff --git a/zavodik/nodes/ComfyUI-WanAnimatePreprocess/utils.py b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1412f3e12aaafb9ba4b71e35a5c7e00789ffd5a5 --- /dev/null +++ b/zavodik/nodes/ComfyUI-WanAnimatePreprocess/utils.py @@ -0,0 +1,317 @@ +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. +import os +import cv2 +import math +import random +import numpy as np + +def get_mask_boxes(mask): + y_coords, x_coords = np.nonzero(mask) + x_min = x_coords.min() + x_max = x_coords.max() + y_min = y_coords.min() + y_max = y_coords.max() + bbox = np.array([x_min, y_min, x_max, y_max]).astype(np.int32) + return bbox + + +def get_aug_mask(body_mask, w_len=10, h_len=20): + body_bbox = get_mask_boxes(body_mask) + + bbox_wh = body_bbox[2:4] - body_bbox[0:2] + w_slice = np.int32(bbox_wh[0] / w_len) + h_slice = np.int32(bbox_wh[1] / h_len) + + for each_w in range(body_bbox[0], body_bbox[2], w_slice): + w_start = min(each_w, body_bbox[2]) + w_end = min((each_w + w_slice), body_bbox[2]) + for each_h in range(body_bbox[1], body_bbox[3], h_slice): + h_start = min(each_h, body_bbox[3]) + h_end = min((each_h + h_slice), body_bbox[3]) + if body_mask[h_start:h_end, w_start:w_end].sum() > 0: + body_mask[h_start:h_end, w_start:w_end] = 1 + + return body_mask + +def get_mask_body_img(img_copy, hand_mask, k=7, iterations=1): + kernel = np.ones((k, k), np.uint8) + dilation = cv2.dilate(hand_mask, kernel, iterations=iterations) + mask_hand_img = img_copy * (1 - dilation[:, :, None]) + + return mask_hand_img, dilation + + +def get_face_bboxes(kp2ds, scale, image_shape, ratio_aug): + h, w = image_shape + kp2ds_face = kp2ds.copy()[23:91, :2] + + min_x, min_y = np.min(kp2ds_face, axis=0) + max_x, max_y = np.max(kp2ds_face, axis=0) + + + initial_width = max_x - min_x + initial_height = max_y - min_y + + initial_area = initial_width * initial_height + + expanded_area = initial_area * scale + + new_width = np.sqrt(expanded_area * (initial_width / initial_height)) + new_height = np.sqrt(expanded_area * (initial_height / initial_width)) + + delta_width = (new_width - initial_width) / 2 + delta_height = (new_height - initial_height) / 4 + + if ratio_aug: + if random.random() > 0.5: + delta_width += random.uniform(0, initial_width // 10) + else: + delta_height += random.uniform(0, initial_height // 10) + + expanded_min_x = max(min_x - delta_width, 0) + expanded_max_x = min(max_x + delta_width, w) + expanded_min_y = max(min_y - 3 * delta_height, 0) + expanded_max_y = min(max_y + delta_height, h) + + return [int(expanded_min_x), int(expanded_max_x), int(expanded_min_y), int(expanded_max_y)] + + +def calculate_new_size(orig_w, orig_h, target_area, divisor=64): + + target_ratio = orig_w / orig_h + + def check_valid(w, h): + + if w <= 0 or h <= 0: + return False + return (w * h <= target_area and + w % divisor == 0 and + h % divisor == 0) + + def get_ratio_diff(w, h): + + return abs(w / h - target_ratio) + + def round_to_64(value, round_up=False, divisor=64): + + if round_up: + return divisor * ((value + (divisor - 1)) // divisor) + return divisor * (value // divisor) + + possible_sizes = [] + + max_area_h = int(np.sqrt(target_area / target_ratio)) + max_area_w = int(max_area_h * target_ratio) + + max_h = round_to_64(max_area_h, round_up=True, divisor=divisor) + max_w = round_to_64(max_area_w, round_up=True, divisor=divisor) + + for h in range(divisor, max_h + divisor, divisor): + ideal_w = h * target_ratio + + w_down = round_to_64(ideal_w) + w_up = round_to_64(ideal_w, round_up=True) + + for w in [w_down, w_up]: + if check_valid(w, h, divisor): + possible_sizes.append((w, h, get_ratio_diff(w, h))) + + if not possible_sizes: + raise ValueError("Can not find suitable size") + + possible_sizes.sort(key=lambda x: (-x[0] * x[1], x[2])) + + best_w, best_h, _ = possible_sizes[0] + return int(best_w), int(best_h) + + +def resize_by_area(image, target_area, keep_aspect_ratio=True, divisor=64, padding_color=(0, 0, 0)): + h, w = image.shape[:2] + try: + new_w, new_h = calculate_new_size(w, h, target_area, divisor) + except: + aspect_ratio = w / h + + if keep_aspect_ratio: + new_h = math.sqrt(target_area / aspect_ratio) + new_w = target_area / new_h + else: + new_w = new_h = math.sqrt(target_area) + + new_w, new_h = int((new_w // divisor) * divisor), int((new_h // divisor) * divisor) + + interpolation = cv2.INTER_AREA if (new_w * new_h < w * h) else cv2.INTER_LINEAR + + resized_image = padding_resize(image, height=new_h, width=new_w, padding_color=padding_color, + interpolation=interpolation) + return resized_image + + +def padding_resize(img_ori, height=512, width=512, padding_color=(0, 0, 0), interpolation=cv2.INTER_LINEAR): + ori_height = img_ori.shape[0] + ori_width = img_ori.shape[1] + channel = img_ori.shape[2] + + img_pad = np.zeros((height, width, channel), dtype=img_ori.dtype) + if channel == 1: + img_pad[:, :, 0] = padding_color[0] + else: + img_pad[:, :, 0] = padding_color[0] + img_pad[:, :, 1] = padding_color[1] + img_pad[:, :, 2] = padding_color[2] + + if (ori_height / ori_width) > (height / width): + new_width = int(height / ori_height * ori_width) + img = cv2.resize(img_ori, (new_width, height), interpolation=interpolation) + padding = int((width - new_width) / 2) + if len(img.shape) == 2: + img = img[:, :, np.newaxis] + img_pad[:, padding: padding + new_width, :] = img + else: + new_height = int(width / ori_width * ori_height) + img = cv2.resize(img_ori, (width, new_height), interpolation=interpolation) + padding = int((height - new_height) / 2) + if len(img.shape) == 2: + img = img[:, :, np.newaxis] + img_pad[padding: padding + new_height, :, :] = img + + return img_pad + +def resize_to_bounds(img_ori, height=512, width=512, padding_color=(0, 0, 0), interpolation=cv2.INTER_LINEAR, extra_padding=64, crop_target_image=None): + # Find non-black pixel bounds + if crop_target_image is not None: + ref = crop_target_image + if ref.ndim == 2: + mask = ref > 0 + else: + mask = np.any(ref != 0, axis=2) + coords = np.argwhere(mask) + if coords.size == 0: + # All black, fallback to full image + y0, x0 = 0, 0 + y1, x1 = img_ori.shape[0], img_ori.shape[1] + else: + y0, x0 = coords.min(axis=0) + y1, x1 = coords.max(axis=0) + 1 + # Intended crop bounds with padding + pad_y0 = y0 - extra_padding + pad_x0 = x0 - extra_padding + pad_y1 = y1 + extra_padding + pad_x1 = x1 + extra_padding + # Actual crop bounds clipped to image + crop_y0 = max(pad_y0, 0) + crop_x0 = max(pad_x0, 0) + crop_y1 = min(pad_y1, img_ori.shape[0]) + crop_x1 = min(pad_x1, img_ori.shape[1]) + crop_img = img_ori[crop_y0:crop_y1, crop_x0:crop_x1] + # Pad if needed + pad_top = crop_y0 - pad_y0 + pad_left = crop_x0 - pad_x0 + pad_bottom = pad_y1 - crop_y1 + pad_right = pad_x1 - crop_x1 + if any([pad_top, pad_left, pad_bottom, pad_right]): + channel = crop_img.shape[2] if crop_img.ndim == 3 else 1 + crop_img = np.pad( + crop_img, + ((pad_top, pad_bottom), (pad_left, pad_right)) + ((0, 0),) if channel > 1 else ((pad_top, pad_bottom), (pad_left, pad_right)), + mode='constant', constant_values=0 + ) + else: + if img_ori.ndim == 2: + mask = img_ori > 0 + else: + mask = np.any(img_ori != 0, axis=2) + coords = np.argwhere(mask) + if coords.size == 0: + # All black, fallback to original + crop_img = img_ori + else: + y0, x0 = coords.min(axis=0) + y1, x1 = coords.max(axis=0) + 1 + pad_y0 = y0 - extra_padding + pad_x0 = x0 - extra_padding + pad_y1 = y1 + extra_padding + pad_x1 = x1 + extra_padding + crop_y0 = max(pad_y0, 0) + crop_x0 = max(pad_x0, 0) + crop_y1 = min(pad_y1, img_ori.shape[0]) + crop_x1 = min(pad_x1, img_ori.shape[1]) + crop_img = img_ori[crop_y0:crop_y1, crop_x0:crop_x1] + pad_top = crop_y0 - pad_y0 + pad_left = crop_x0 - pad_x0 + pad_bottom = pad_y1 - crop_y1 + pad_right = pad_x1 - crop_x1 + if any([pad_top, pad_left, pad_bottom, pad_right]): + channel = crop_img.shape[2] if crop_img.ndim == 3 else 1 + crop_img = np.pad( + crop_img, + ((pad_top, pad_bottom), (pad_left, pad_right)) + ((0, 0),) if channel > 1 else ((pad_top, pad_bottom), (pad_left, pad_right)), + mode='constant', constant_values=0 + ) + + ori_height = crop_img.shape[0] + ori_width = crop_img.shape[1] + channel = crop_img.shape[2] if crop_img.ndim == 3 else 1 + + img_pad = np.zeros((height, width, channel), dtype=crop_img.dtype) + if channel == 1: + img_pad[:, :, 0] = padding_color[0] + else: + for c in range(channel): + img_pad[:, :, c] = padding_color[c % len(padding_color)] + + # Resize cropped image to fit target size, preserving aspect ratio + crop_aspect = ori_width / ori_height + target_aspect = width / height + if crop_aspect > target_aspect: + new_width = width + new_height = int(width / crop_aspect) + else: + new_height = height + new_width = int(height * crop_aspect) + img = cv2.resize(crop_img, (new_width, new_height), interpolation=interpolation) + if img.ndim == 2: + img = img[:, :, np.newaxis] + y_pad = (height - new_height) // 2 + x_pad = (width - new_width) // 2 + img_pad[y_pad:y_pad + new_height, x_pad:x_pad + new_width, :] = img + + return img_pad + + +def get_frame_indices(frame_num, video_fps, clip_length, train_fps): + + start_frame = 0 + times = np.arange(0, clip_length) / train_fps + frame_indices = start_frame + np.round(times * video_fps).astype(int) + frame_indices = np.clip(frame_indices, 0, frame_num - 1) + + return frame_indices.tolist() + + +def get_face_bboxes(kp2ds, scale, image_shape): + h, w = image_shape + kp2ds_face = kp2ds.copy()[1:] * (w, h) + + min_x, min_y = np.min(kp2ds_face, axis=0) + max_x, max_y = np.max(kp2ds_face, axis=0) + + initial_width = max_x - min_x + initial_height = max_y - min_y + + initial_area = initial_width * initial_height + + expanded_area = initial_area * scale + + new_width = np.sqrt(expanded_area * (initial_width / initial_height)) + new_height = np.sqrt(expanded_area * (initial_height / initial_width)) + + delta_width = (new_width - initial_width) / 2 + delta_height = (new_height - initial_height) / 4 + + expanded_min_x = max(min_x - delta_width, 0) + expanded_max_x = min(max_x + delta_width, w) + expanded_min_y = max(min_y - 3 * delta_height, 0) + expanded_max_y = min(max_y + delta_height, h) + + return [int(expanded_min_x), int(expanded_max_x), int(expanded_min_y), int(expanded_max_y)] \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/.github/workflows/publish_action.yml b/zavodik/nodes/ComfyUI_tinyterraNodes-main/.github/workflows/publish_action.yml new file mode 100644 index 0000000000000000000000000000000000000000..c594d63b0f6b8af65e60fec09d52723a8970bdaa --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/.github/workflows/publish_action.yml @@ -0,0 +1,24 @@ +name: Publish to Comfy registry +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "pyproject.toml" + +permissions: + issues: write + +jobs: + publish-node: + name: Publish Custom Node to registry + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'TinyTerra' }} + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@v1 + with: + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/.gitignore b/zavodik/nodes/ComfyUI_tinyterraNodes-main/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a57d6b3f0a04d1a311136cebe0675ad142b84465 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/.gitignore @@ -0,0 +1,3 @@ +config.ini +nsp_pantry.json +__pycache__ \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/LICENSE b/zavodik/nodes/ComfyUI_tinyterraNodes-main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/README.md b/zavodik/nodes/ComfyUI_tinyterraNodes-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..65609a2eee27c121d7666efa20a6f556b9c33d06 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/README.md @@ -0,0 +1,484 @@ +# tinyterraNodes + +*A selection of custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI).* + +**Enjoy my nodes and would like to [help keep me awake](https://buymeacoffee.com/tinyterra)?** + + + + + +## Installation +Navigate to the **_ComfyUI/custom_nodes_** directory with cmd, and run: + +`git clone https://github.com/TinyTerra/ComfyUI_tinyterraNodes.git` + +### Special Features +**ttN Image Viewer** + +*Enabled by default* + ++ Adds '🌏 Fullscreen Image Viewer' to the node right-click context menu Opens a Fullscreen image viewer - containing all images generated by the selected node during the current comfy session. ++ Adds '🌏 Popout Image Viewer' to the node right-click context menu Opens a Popout image viewer - containing all images generated by the selected node during the current comfy session. ++ Adds '🌏 Set Default Fullscreen Node' to the node right-click context menu Sets the currently selected node as the default Fullscreen node ++ Adds '🌏 Clear Default Fullscreen Node' to the node right-click context menu Clears the assigned default Fullscreen node + + ++ Slideshow Mode + + Toggled On - Automatically jumps to New images as they are generated if the last image is selected (Black Background) - the UI will auto hide after a set time. + + Toggled Off - Holds to the current user selected image (Light Background) ++ UI Overlay + + Toggles display of a navigable preview of all of the selected nodes images + + Toggles display of the Image Viewer Settings button + ++ *Shortcuts* + + 'shift + F11' => _Open ttN-Fullscreen Image Viewer using selected node OR default fullscreen node_ + + 'shift + F10' => _Open ttN-Popout Image Viewer using selected node OR default fullscreen node_ + ++ *Shortcuts in Image Viewer* + + 'up arrow' => _Toggle UI Overlay_ + + 'down arrow' => _Toggle Slideshow Mode_ + + 'left arrow' => _Select Image to the left_ + + 'shift + left arrow' => _Select Image 5 to the left_ + + 'ctrl + left arrow' => _Select the first Image_ + + 'right arrow' => _Select Image to the right_ + + 'shift + right arrow' => _Select Image 5 to the right_ + + 'ctrl + right arrow' => _Select last Image_ + + 'mouse scroll' => _Zoom the current image in and out_ + + 'ctrl + mouse scroll' => _Select image to Left/Right_ + + 'left click + drag' => _Update the current image's position_ + + 'double click' => _Reset position of current image_ + + 'esc' => _Close Image Viewer_ + + 'F' => _Fit image to Viewer window_ + + Show UI with mouse hover in Slideshow mode + +**Advanced XY(Z)Plot** ++ pipeKSampler/SDXL input to generate xyz plots using any previous input nodes. + + _(Any values not set by xyPlot will be taken from the corresponding nodes)_ + ++ Advanced xyPlot can take multiple variables for each axis somewhat programmatically. + ++ Any image input - Use the 'advPlot images' node to create an xyplot from any image input. + +Syntax: +``` + +[node_ID:widget_Name='value'] + + +[node_ID:widget_Name='value2'] +[node_ID:widget2_Name='value'] +[node_ID2:widget_Name='value'] +``` +For Example: +``` +<1:v_label> +[2:ckpt_name='model.safetensors'] + +<2:custom label> +[2:ckpt_name='checkpoint.xyz'] +[2:vae_name='someVae.xyz'] +[4:text='Summer sunset'] +``` ++ labels: + + Any custom string for a custom axis label + + v_label - for a concatenation of the values being set. In the example above if both were set to v_label: + + model.safetensors + + checkpoint.xyz, someVae.xyz, Summer sunset + + tv_label - for the option title and value concatenated. In the example above if both were set to tv_label: + + ckpt_name: model.safetensors + + ckpt_name: checkpoint.xyz, vae_name: someVae.xyz, text: Summer sunset + + itv_label - for the node ID, option title and value concatenated. In the example above if both were set to itv_label: + + [2] ckpt_name: model.safetensors + + [2] ckpt_name: checkpoint.xyz, [2] vae_name: someVae.xyz, [4] text: Summer sunset ++ Node ID's: + + Suggested to use 'Badge: ID + nickname' in [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) settings to be able to view node IDs. ++ Autocomplete: + + ttN Autocomplete will activate when the advanced xyPlot node is connected to a sampler, and will show all the nodes and options available, as well as an 'add axis' option to auto add the code for a new axis number and label. ++ Search and Replace: + + if you include %search;replace% as the value it will use the current nodes value and do a search and replace using these values. + + you can include more than one to replace different strings ++ Append to original value + + if you include .append to the widget name it will append the xyPlot value to the original instead of overwriting it. + + For example: [1:loras.append='\'] ++ Z-Axis support for multi plotting + + Creates extra xyPlots with the z-axis value changes as a base ++ Node based plotting to avoid requiring manually writing syntax + + advPlot range for easily created int/float ranges + + advPlot string for delimited string 'ranges' + +**Auto Complete** + +*Enabled by default* ++ displays a popup to autocomplete embedding filenames in text widgets - to use, start typing **embedding** and select an option from the list. ++ displays a popup to autocomplete noodlesoup categories - to use, start typing **__** and select an option from the list. ++ displays a popup in ttN 'loras' input to autocomplete loras from a list. ++ Option to disable ([ttNodes] enable_embed_autocomplete = True | False) + +**Dynamic Widgets** + +*Enabled by default* + ++ Automatically hides and shows widgets depending on their relevancy ++ Option to disable ([ttNodes] enable_dynamic_widgets = True | False) + +**ttNinterface** + +*Enabled by default* + ++ Adds 'Node Dimensions 🌏' to the node right-click context menu Allows setting specific node Width and Height values as long as they are above the minimum size for the given node. ++ Adds 'Default BG Color 🌏' to the node right-click context menu Allows setting specific default background color for every node added. + ++ Adds support for 'ctrl + arrow key' Node movement This aligns the node(s) to the set ComfyUI grid spacing size and move the node in the direction of the arrow key by the grid spacing value. Holding shift in addition will move the node by the grid spacing size * 10. ++ Adds 'Reload Node 🌏' to the node right-click context menu Creates a new instance of the node with the same position, size, color and title . It attempts to retain set widget values which is useful for replacing nodes when a node/widget update occurs ++ Adds 'Slot Type Color 🌏' to the Link right-click context menu Opens a color picker dialog menu to update the color of the selected link type. ++ Adds 'Link Border 🌏' to the Link right-click context menu Toggles link line border. ++ Adds 'Link Shadow 🌏' to the Link right-click context menu Toggles link line shadow. ++ Adds 'Link Style 🌏' to the Link right-click context menu Sets the default link line type. + + +**Save image prefix parsing** + ++ Add date/time info to filenames or output folder by using: %date:yyyy-MM-dd-hh-mm-ss% ++ Parse any upstream setting into filenames or output folder by using %[widget_name]% (for the current node) +or %input_name>input_name>widget_name% (for inputting nodes) + Example: + + +  + + +**Node Versioning** + ++ All tinyterraNodes now have a version property so that if any future changes are made to widgets that would break workflows the nodes will be highlighted on load ++ Will only work with workflows created/saved after the v1.0.0 release + +**AutoUpdate** + +*Disabled by default* + ++ Option to auto-update the node pack ([ttNodes] auto_update = False | True) + + + + $\Large\color{white}{Nodes}$ + +## ttN/base + + tinyLoader + + + + tinyConditioning + + + + tinyKSampler + + +## ttN/pipe + + + pipeLoader v2 + +(Includes [ADV_CLIP_emb](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb)) + + + + + + + pipeKSampler v2 + + +Embedded with Advanced CLIP Text Encode with an additional pipe output + + + + +Old node layout: + + + +With pipeLoader and pipeKSampler: + + + + + + pipeKSamplerAdvanced v2 + +Embedded with Advanced CLIP Text Encode with an additional pipe output + + + + + + pipeLoaderSDXL v2 + +SDXL Loader and Advanced CLIP Text Encode with an additional pipe output + + + + + + + pipeKSamplerSDXL v2 + +SDXL Sampler (base and refiner in one) and Advanced CLIP Text Encode with an additional pipe output + + + +Old node layout: + + + +With pipeLoaderSDXL and pipeKSamplerSDXL: + + + + + + + + pipeEDIT + +Update/Overwrite any of the 8 original inputs in a Pipe line with new information. ++ _**Inputs -** pipe, model, conditioning, conditioning, samples, vae, clip, image, seed_ ++ _**Outputs -** pipe_ + + + + pipe > basic_pipe + +Convert ttN pipe line to basic pipe (to be compatible with [ImpactPack](https://github.com/ltdrdata/ComfyUI-Impact-Pack)), WITH original pipe throughput ++ _**Inputs -** pipe[model, conditioning, conditioning, samples, vae, clip, image, seed]_ ++ _**Outputs -** basic_pipe[model, clip, vae, conditioning, conditioning], pipe_ + + + + pipe > Detailer Pipe + +Convert ttN pipe line to detailer pipe (to be compatible with [ImpactPack](https://github.com/ltdrdata/ComfyUI-Impact-Pack)), WITH original pipe throughput ++ _**Inputs -** pipe[model, conditioning, conditioning, samples, vae, clip, image, seed], bbox_detector, sam_model_opt_ ++ _**Outputs -** detailer_pipe[model, vae, conditioning, conditioning, bbox_detector, sam_model_opt], pipe_ + + +## ttN/xyPlot + + adv xyPlot + +pipeKSampler input to generate xy plots using sampler and loader values. (Any values not set by xyPlot will be taken from the corresponding nodes) + + + + + + advPlot images + +Node to generate xyz plots from any image inputs. + + + + advPlot range + +adv_xyPlot input to generate plot syntax across a range of values. + + + + advPlot string + +adv_xyPlot input to generate plot syntax for strings via a delimiter. + + + + advPlot combo + +adv_xyPlot input to generate plot syntax for combos with various modes. + + +## ttN/image + + + imageOutput + +Preview or Save an image with one node, with image throughput. ++ _**Inputs -** image, image output[Hide, Preview, Save, Hide/Save], output path, save prefix, number padding[None, 2-9], file type[PNG, JPG, JPEG, BMP, TIFF, TIF] overwrite existing[True, False], embed workflow[True, False]_ ++ _**Outputs -** image_ + + + + + imageRemBG + +(Using [RemBG](https://github.com/danielgatis/rembg)) + +Background Removal node with optional image preview & save. ++ _**Inputs -** image, image output[Disabled, Preview, Save], save prefix_ ++ _**Outputs -** image, mask_ + +Example of a photobashing workflow using pipeNodes, imageRemBG, imageOutput and nodes from [ADV_CLIP_emb](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb) and [ImpactPack](https://github.com/ltdrdata/ComfyUI-Impact-Pack/tree/Main): + + + + + + hiresFix + +Upscale image by model, optional rescale of result image. ++ _**Inputs -** image, vae, upscale_model, rescale_after_model[true, false], rescale[by_percentage, to Width/Height], rescale method[nearest-exact, bilinear, area], factor, width, height, crop, image_output[Hide, Preview, Save], save prefix, output_latent[true, false]_ ++ _**Outputs -** image, latent_ + + +## ttN/text + + text + +Basic TextBox Loader. ++ _**Outputs -** text (STRING)_ + + + + textDebug + +Text input, to display text inside the node, with optional print to console. ++ _**inputs -** text, print_to_console_ ++ _**Outputs -** text (STRING)_ + + + + textConcat + +3 TextBOX inputs with a single concatenated output. ++ _**inputs -** text1, text2, text3 (STRING's), delimiter_ ++ _**Outputs -** text (STRING)_ + + + + 7x TXT Loader Concat + +7 TextBOX inputs concatenated with spaces into a single output, AND separate text outputs. ++ _**inputs -** text1, text2, text3, text4, text5, text6, text7 (STRING's), delimiter_ ++ _**Outputs -** text1, text2, text3, text4, text5, text6, text7, concat (STRING's)_ + + + + 3x TXT Loader MultiConcat + +3 TextBOX inputs with separate text outputs AND multiple concatenation variations (concatenated with spaces). ++ _**inputs -** text1, text2, text3 (STRING's), delimiter_ ++ _**Outputs -** text1, text2, text3, 1 & 2, 1 & 3, 2 & 3, concat (STRING's)_ + + +## ttN/util + + seed + +Basic Seed Loader. ++ _**Outputs -** seed (INT)_ + + + + float + +float loader and converter ++ _**inputs -** float (FLOAT)_ ++ _**Outputs -** float, int, text (FLOAT, INT, STRING)_ + + + + int + +int loader and converter ++ _**inputs -** int (INT)_ ++ _**Outputs -** int, float, text (INT, FLOAT, STRING)_ + + + + +## ttN/legacy + + + pipeLoader v1 + +(Modified from [Efficiency Nodes](https://github.com/LucianoCirino/efficiency-nodes-comfyui) and [ADV_CLIP_emb](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb)) + +Combination of Efficiency Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** model, vae, clip skip, (lora1, modelstrength clipstrength), (Lora2, modelstrength clipstrength), (Lora3, modelstrength clipstrength), (positive prompt, token normalization, weight interpretation), (negative prompt, token normalization, weight interpretation), (latent width, height), batch size, seed_ ++ _**Outputs -** pipe, model, conditioning, conditioning, samples, vae, clip, seed_ + + + + pipeKSampler v1 + +(Modified from [Efficiency Nodes](https://github.com/LucianoCirino/efficiency-nodes-comfyui) and [QOLS_Omar92](https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92)) + +Combination of Efficiency Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** pipe, (optional pipe overrides), xyplot, (Lora, model strength, clip strength), (upscale method, factor, crop), sampler state, steps, cfg, sampler name, scheduler, denoise, (image output [None, Preview, Save]), Save_Prefix, seed_ ++ _**Outputs -** pipe, model, conditioning, conditioning, samples, vae, clip, image, seed_ + +Old node layout: + + + +With pipeLoader and pipeKSampler: + + + + + + pipeKSamplerAdvanced v1 + +Combination of Efficiency Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** pipe, (optional pipe overrides), xyplot, (Lora, model strength, clip strength), (upscale method, factor, crop), sampler state, steps, cfg, sampler name, scheduler, starts_at_step, return_with_leftover_noise, (image output [None, Preview, Save]), Save_Prefix_ ++ _**Outputs -** pipe, model, conditioning, conditioning, samples, vae, clip, image, seed_ + + + + + pipeLoaderSDXL v1 + +SDXL Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** model, vae, clip skip, (lora1, modelstrength clipstrength), (Lora2, modelstrength clipstrength), model, vae, clip skip, (lora1, modelstrength clipstrength), (Lora2, modelstrength clipstrength), (positive prompt, token normalization, weight interpretation), (negative prompt, token normalization, weight interpretation), (latent width, height), batch size, seed_ ++ _**Outputs -** sdxlpipe, model, conditioning, conditioning, vae, model, conditioning, conditioning, vae, samples, clip, seed_ + + + + pipeKSamplerSDXL v1 + +SDXL Sampler (base and refiner in one) and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** sdxlpipe, (optional pipe overrides), (upscale method, factor, crop), sampler state, base_steps, refiner_steps cfg, sampler name, scheduler, (image output [None, Preview, Save]), Save_Prefix, seed_ ++ _**Outputs -** pipe, model, conditioning, conditioning, vae, model, conditioning, conditioning, vae, samples, clip, image, seed_ + +Old node layout: + + + +With pipeLoaderSDXL and pipeKSamplerSDXL: + + + + + + pipeIN + +Encode up to 8 frequently used inputs into a single Pipe line. ++ _**Inputs -** model, conditioning, conditioning, samples, vae, clip, image, seed_ ++ _**Outputs -** pipe_ + + + + pipeOUT + +Decode single Pipe line into the 8 original outputs, AND a Pipe throughput. ++ _**Inputs -** pipe_ ++ _**Outputs -** model, conditioning, conditioning, samples, vae, clip, image, seed, pipe_ + + + + pipe > xyPlot + +pipeKSampler input to generate xy plots using sampler and loader values. (Any values not set by xyPlot will be taken from the corresponding pipeKSampler or pipeLoader) ++ _**Inputs -** grid_spacing, latent_id, flip_xy, x_axis, x_values, y_axis, y_values_ ++ _**Outputs -** xyPlot_ + \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/__init__.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..610e39719f403c9e21eb833830a140f70179cb6c --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/__init__.py @@ -0,0 +1,162 @@ +from .ttNpy.tinyterraNodes import TTN_VERSIONS +from .ttNpy import ttNserver # Do Not Remove +import configparser +import folder_paths +import subprocess +import shutil +import os + +# ------- CONFIG -------- # +cwd_path = os.path.dirname(os.path.realpath(__file__)) +js_path = os.path.join(cwd_path, "js") +comfy_path = folder_paths.base_path + +config_path = os.path.join(cwd_path, "config.ini") + +optionValues = { + "auto_update": ('true', 'false'), + "enable_embed_autocomplete": ('true', 'false'), + "enable_interface": ('true', 'false'), + "enable_fullscreen": ('true', 'false'), + "enable_dynamic_widgets": ('true', 'false'), + "enable_dev_nodes": ('true', 'false'), + } + +def get_config(): + """Return a configparser.ConfigParser object.""" + config = configparser.ConfigParser() + config.read(config_path) + return config + +def update_config(): + #section > option > value + for node, version in TTN_VERSIONS.items(): + config_write("Versions", node, version) + + for option, value in optionValues.items(): + config_write("Option Values", option, value) + + section_data = { + "ttNodes": { + "auto_update": False, + "enable_interface": True, + "enable_fullscreen": True, + "enable_embed_autocomplete": True, + "enable_dynamic_widgets": True, + "enable_dev_nodes": False, + } + } + + for section, data in section_data.items(): + for option, value in data.items(): + if config_read(section, option) is None: + config_write(section, option, value) + + # Load the configuration data into a dictionary. + config_data = config_load() + + # Iterate through the configuration data. + for section, options in config_data.items(): + if section == "Versions": + continue + for option in options: + # If the option is not in `optionValues` or in `section_data`, remove it. + if (option not in optionValues and + (section not in section_data or option not in section_data[section])): + config_remove(section, option) + +def config_load(): + """Load the entire configuration into a dictionary.""" + config = get_config() + return {section: dict(config.items(section)) for section in config.sections()} + +def config_read(section, option): + """Read a configuration option.""" + config = get_config() + return config.get(section, option, fallback=None) + +def config_write(section, option, value): + """Write a configuration option.""" + config = get_config() + if not config.has_section(section): + config.add_section(section) + config.set(section, str(option), str(value)) + + with open(config_path, 'w') as f: + config.write(f) + +def config_remove(section, option): + """Remove an option from a section.""" + config = get_config() + if config.has_section(section): + config.remove_option(section, option) + with open(config_path, 'w') as f: + config.write(f) + +def config_value_validator(section, option, default): + value = str(config_read(section, option)).lower() + if value not in optionValues[option]: + print(f'\033[92m[{section} Config]\033[91m {option} - \'{value}\' not in {optionValues[option]}, reverting to default.\033[0m') + config_write(section, option, default) + return default + else: + return value + +# Create a config file if not exists +if not os.path.isfile(config_path): + with open(config_path, 'w') as f: + pass + +update_config() + +# Autoupdate if True +if config_value_validator("ttNodes", "auto_update", 'false') == 'true': + try: + with subprocess.Popen(["git", "pull"], cwd=cwd_path, stdout=subprocess.PIPE) as p: + p.wait() + result = p.communicate()[0].decode() + if result != "Already up to date.\n": + print("\033[92m[t ttNodes Updated t]\033[0m") + except: + pass + +# --------- WEB ---------- # +# Remove old web JS folder +web_extension_path = os.path.join(comfy_path, "web", "extensions", "tinyterraNodes") + +if os.path.exists(web_extension_path): + try: + shutil.rmtree(web_extension_path) + except: + print("\033[92m[ttNodes] \033[0;31mFailed to remove old web extension.\033[0m") + +js_files = { + "interface": "enable_interface", + "imgViewer": "enable_fullscreen", + "embedAC": "enable_embed_autocomplete", + "dynamicWidgets": "enable_dynamic_widgets", +} +for js_file, config_key in js_files.items(): + file_path = os.path.join(js_path, f"ttN{js_file}.js") + if config_value_validator("ttNodes", config_key, 'true') == 'false' and os.path.isfile(file_path): + os.rename(file_path, f"{file_path}.disable") + elif config_value_validator("ttNodes", config_key, 'true') == 'true' and os.path.isfile(f"{file_path}.disable"): + os.rename(f"{file_path}.disable", file_path) + +# Enable Dev Nodes if True +if config_value_validator("ttNodes", "enable_dev_nodes", 'true') == 'true': + from .ttNdev import NODE_CLASS_MAPPINGS as ttNdev_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as ttNdev_DISPLAY_NAME_MAPPINGS +else: + ttNdev_CLASS_MAPPINGS = {} + ttNdev_DISPLAY_NAME_MAPPINGS = {} + +# ------- MAPPING ------- # +from .ttNpy.tinyterraNodes import NODE_CLASS_MAPPINGS as TTN_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as TTN_DISPLAY_NAME_MAPPINGS +from .ttNpy.ttNlegacyNodes import NODE_CLASS_MAPPINGS as LEGACY_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as LEGACY_DISPLAY_NAME_MAPPINGS + +NODE_CLASS_MAPPINGS = {**TTN_CLASS_MAPPINGS, **LEGACY_CLASS_MAPPINGS, **ttNdev_CLASS_MAPPINGS} +NODE_DISPLAY_NAME_MAPPINGS = {**TTN_DISPLAY_NAME_MAPPINGS, **LEGACY_DISPLAY_NAME_MAPPINGS, **ttNdev_DISPLAY_NAME_MAPPINGS} + +WEB_DIRECTORY = "./js" + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', 'WEB_DIRECTORY'] diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/arial.ttf b/zavodik/nodes/ComfyUI_tinyterraNodes-main/arial.ttf new file mode 100644 index 0000000000000000000000000000000000000000..b251fe231398e2419aeec5bf8b4423152489e813 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/arial.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:413c78f91bd39e134f3c0bb204b1d5a90f29df9efddc8fd26950a178058d5d74 +size 367112 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/icon.jpg b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/icon.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b08a1b092a8875071fa34a80b2b1b78c2dfe5f3 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/icon.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53eec0f8217d979a0b1be6ca7d1c027a968329f7bf8b4ce37526d51a842a0e41 +size 43380 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_pipeSDXL.png b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_pipeSDXL.png new file mode 100644 index 0000000000000000000000000000000000000000..8351641b63713f82b0fb93aa9d9502a16bd7c070 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_pipeSDXL.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a2952324499183287e42b5b538e6dc2d47d7fc6d197b076c80a1fb020bbadf6 +size 846316 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_prefixParsing.png b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_prefixParsing.png new file mode 100644 index 0000000000000000000000000000000000000000..9d2e09da4bae37a8c1b9c0ee0b5b85a51d3ca437 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_prefixParsing.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8badcc39ea328b71b301aa9b4a1b014a9c797e25e57724947e5e84f9e4e9080 +size 953564 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_trueHRFix.png b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_trueHRFix.png new file mode 100644 index 0000000000000000000000000000000000000000..249202c370937209cc30f8dd2741ea53b2917ffa --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_trueHRFix.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9bffd61a21fc143c3eb6bb40d9acd6c056ab2b959a8931dd68f3b081a36aa8d +size 1102664 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_xyPlot.png b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_xyPlot.png new file mode 100644 index 0000000000000000000000000000000000000000..5e6165adf943bad6572a67dcc1f8e65c9f2f8506 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/images/tinyterra_xyPlot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90f000697e72101a6ec95bf00d810c073460abbc0208e9c5c5e3569cdf94dfe5 +size 849838 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttN.css b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttN.css new file mode 100644 index 0000000000000000000000000000000000000000..41398a97345a43d503fd1ec61ebff421ce680e80 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttN.css @@ -0,0 +1,130 @@ +.litegraph.litecontextmenu .litemenu-title .tinyterra-contextmenu-title, +.litegraph.litecontextmenu .litemenu-entry.tinyterra-contextmenu-item { + background-color: #212121 !important; + margin: 0; + display: flex; + flex-direction: row; + align-items: center; + justify-content: start; +} + +.litegraph.litecontextmenu .litemenu-title .tinyterra-contextmenu-title, +.litegraph.litecontextmenu .litemenu-entry.tinyterra-contextmenu-label { + background-color: #000 !important; + margin: 0; + cursor: default; + opacity: 1; + padding: 4px; + font-weight: bold; +} + + + + +/* Dropdown */ +.ttN-dropdown, .ttN-nested-dropdown { + position: relative; + box-sizing: border-box; + background-color: #171717; + box-shadow: 0 4px 4px rgba(255, 255, 255, .25); + padding: 0; + margin: 0; + list-style: none; + z-index: 1000; + overflow: visible; + max-height: fit-content; + max-width: fit-content; +} + +.ttN-dropdown { + position: absolute; + border-radius: 0; +} + +.ttN-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + min-width: 220px; + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; +} + +.ttN-nested-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar { + width: 10px; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track { + background: #121212; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb { + background: #4b4b4b; + border-radius: 8px; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover { + background: #646464; +} + +/* Style for final items */ +.ttN-dropdown li.item, .ttN-nested-dropdown li.item { + font-weight: normal; + min-width: max-content; +} + +/* Style for folders (parent items) */ +.ttN-dropdown li.folder, .ttN-nested-dropdown li.folder { + cursor: default; + position: relative; + border-right: 3px solid #005757; +} + +.ttN-dropdown li.folder::after, .ttN-nested-dropdown li.folder::after { + content: ">"; + position: absolute; + right: 2px; + font-weight: normal; +} + +.ttN-dropdown li, .ttN-nested-dropdown li { + padding: 4px 10px; + cursor: pointer; + font-family: system-ui; + font-size: 0.7rem; + position: relative; +} + +/* Style for nested dropdowns */ +.ttN-nested-dropdown { + position: absolute; + top: 0; + left: 100%; + margin: 0; + border: none; + display: none; +} + +.ttN-dropdown li.selected > .ttN-nested-dropdown, +.ttN-nested-dropdown li.selected > .ttN-nested-dropdown { + display: block; + border: none; +} + +.ttN-dropdown li.selected, +.ttN-nested-dropdown li.selected { + background-color: #222222; + border: none; +} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttN.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttN.js new file mode 100644 index 0000000000000000000000000000000000000000..ee061a53482bf51602747abfe371284df17e556a --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttN.js @@ -0,0 +1,707 @@ +import { app } from "../../scripts/app.js"; +import { tinyterraReloadNode, wait, rebootAPI, getConfig, convertToInput, hideWidget } from "./utils.js"; +import { openFullscreenApp, openPopoutViewer, _setDefaultFullscreenNode } from "./ttNimgViewer.js"; + +class TinyTerra extends EventTarget { + constructor() { + super(); + this.ctrlKey = false + this.altKey = false + this.shiftKey = false + this.downKeys = {} + this.processingMouseDown = false + this.processingMouseUp = false + this.processingMouseMove = false + window.addEventListener("keydown", (e) => { + this.handleKeydown(e) + }) + window.addEventListener("keyup", (e) => { + this.handleKeyup(e) + }) + this.initialiseContextMenu() + this.initialiseNodeMenu() + this.injectTtnCss() + } + async initialiseContextMenu() { + const that = this; + setTimeout(async () => { + const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function (...args) { + const options = getCanvasMenuOptions.apply(this, [...args]); + options.push(null); + options.push({ + content: `🌏 tinyterraNodes`, + className: "ttN-contextmenu-item ttN-contextmenu-main-item", + submenu: { + options: that.getTinyTerraContextMenuItems(), + }, + }); + + // Remove consecutive null entries + let i = 0; + while (i < options.length) { + if (options[i] === null && (i === 0 || options[i - 1] === null)) { + options.splice(i, 1); + } else { + i++; + } + } + return options; + }; + }, 1000); + } + getTinyTerraContextMenuItems() { + const that = this + return [ + { + content: "🌏 Nodes", + disabled: true, + className: "tinyterra-contextmenu-item tinyterra-contextmenu-label", + }, + { + content: "base", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('base/', args[3], args[2]) + } + }, + { + content: "pipe", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('pipe/', args[3], args[2]) + } + }, + { + content: "xyPlot", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('xyPlot/', args[3], args[2]) + } + }, + { + content: "text", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('text/', args[3], args[2]) + } + }, + { + content: "image", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('image/', args[3], args[2]) + } + }, + { + content: "util", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('util/', args[3], args[2]) + } + }, + { + content: "🌏 Add Group", + disabled: true, + className: "tinyterra-contextmenu-item tinyterra-contextmenu-label", + }, + { + content: "Basic Sampling", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback : function(value, event, mouseEvent, contextMenu){ + that.addGroupMenu('basic', contextMenu, mouseEvent) + } + }, + { + content: "Upscaling", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback : function(value, event, mouseEvent, contextMenu){ + that.addGroupMenu('upscale', contextMenu, mouseEvent) + } + }, + { + content: "xyPlotting", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback : function(value, event, mouseEvent, contextMenu){ + that.addGroupMenu('xyPlot', contextMenu, mouseEvent) + } + }, + { + content: "🌏 Extras", + disabled: true, + className: "tinyterra-contextmenu-item tinyterra-contextmenu-label", + }, + // { + // content: "⚙️ Settings (tinyterra)", + // disabled: true, //!!this.settingsDialog, + // className: "tinyterra-contextmenu-item", + // callback: (...args) => { + // this.settingsDialog = new tinyterraConfigDialog().show(); + // this.settingsDialog.addEventListener("close", (e) => { + // this.settingsDialog = null; + // }); + // }, + // }, + { + content: "🛑 Reboot Comfy", + className: "tinyterra-contextmenu-item", + callback: (...args) => { + rebootAPI(); + wait(1000).then(() => { + window.location.reload(); + }); + } + }, + { + content: "⭐ Star on Github", + className: "tinyterra-contextmenu-item", + callback: (...args) => { + window.open("https://github.com/TinyTerra/ComfyUI_tinyterraNodes", "_blank"); + }, + }, + { + content: "☕ Support TinyTerra", + className: "tinyterra-contextmenu-item", + callback: (...args) => { + window.open("https://buymeacoffee.com/tinyterra", "_blank"); + }, + }, + + ]; + } + addNode = async (node, pos) => { + var canvas = LGraphCanvas.active_canvas; + canvas.graph.beforeChange(); + var node = LiteGraph.createNode(node); + if (node) { + node.pos = pos; + canvas.graph.add(node); + } + canvas.graph.afterChange(); + return node + } + addGroup = async (contextMenu, nodes) => { + var first_event = contextMenu.getFirstEvent(); + var canvas = LGraphCanvas.active_canvas; + var canvasOffset = canvas.convertEventToCanvasOffset(first_event); + + // Create Nodes + for (const nodeData of Object.values(nodes)) { + var node = await this.addNode(nodeData.nodeType, canvasOffset); + nodeData.graphNode = node; + canvasOffset = [canvasOffset[0] + nodeData.width + 10, canvasOffset[1]]; + } + + // Handle Widget Changes + for (const nodeData of Object.values(nodes)) { + var node = nodeData.graphNode; + if (nodeData.widgets) { + for (const [widget, value] of Object.entries(nodeData.widgets)) { + if (value == 'toInput') { + const config = getConfig(widget, node) + convertToInput(node, node.widgets.find((w) => w.name === widget), config); + } else { + if (node) { + node.widgets.find((w) => w.name === widget).value = value + } + } + } + } + } + + // Handle Connections + for (const nodeData of Object.values(nodes)) { + var node = nodeData.graphNode; + if (nodeData.connections) { + for (const c of nodeData.connections) { + node.connect(parseInt(c[0]), nodes[c[1]].graphNode.id, c[2]); + } + } + } + } + addTTNodeMenu(category, prev_menu, e, callback=null) { + var canvas = LGraphCanvas.active_canvas; + var ref_window = canvas.getCanvasWindow(); + var graph = canvas.graph; + const base_category = '🌏 tinyterra/' + category + + var entries = []; + + var nodes = LiteGraph.getNodeTypesInCategory(base_category.slice(0, -1), canvas.filter || graph.filter ); + nodes.map(function(node){ + if (node.skip_list) + return; + + var entry = { + value: node.type, + content: node.title, + className: "tinyterra-contextmenu-item", + has_submenu: false, + callback : function(value, event, mouseEvent, contextMenu){ + var first_event = contextMenu.getFirstEvent(); + canvas.graph.beforeChange(); + var node = LiteGraph.createNode(value.value); + if (node) { + node.pos = canvas.convertEventToCanvasOffset(first_event); + canvas.graph.add(node); + } + if(callback) + callback(node); + canvas.graph.afterChange(); + } + } + + entries.push(entry); + }); + + new LiteGraph.ContextMenu( entries, { event: e, parentMenu: prev_menu }, ref_window ); + } + addGroupMenu(group, prev_menu, e) { + const that = this; + var canvas = LGraphCanvas.active_canvas; + var ref_window = canvas.getCanvasWindow(); + let entries; + switch (group) { + case "basic": + entries = [ + { content: "Base ttN", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN tinyLoader', + graphNode: null, + width: 315, + connections: [ + [0, 'Conditioning', 'model'], + [1, 'KSampler', 'latent'], + [2, 'KSampler', 'vae'], + [3, 'Conditioning', 'clip'], + ], + }, + 'Conditioning': { + nodeType: 'ttN conditioning', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'model'], + [1, 'KSampler', 'positive'], + [2, 'KSampler', 'negative'], + [3, 'KSampler', 'clip'], + ], + }, + 'KSampler': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe Basic", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN pipeLoader_v2', + graphNode: null, + width: 315, + connections: [ + [0, 'KSampler', 'pipe'] + ], + }, + 'KSampler': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe SDXL", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN pipeLoaderSDXL_v2', + graphNode: null, + width: 365, + connections: [ + [0, 'KSampler', 'sdxl_pipe'] + ], + }, + 'KSampler': { + nodeType: 'ttN pipeKSamplerSDXL_v2', + graphNode: null, + width: 365, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + ]; + break; + + case "upscale": + entries = [ + { content: "Base upscale", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN tinyLoader', + graphNode: null, + width: 315, + connections: [ + [0, 'Conditioning', 'model'], + [1, 'KSampler', 'latent'], + [2, 'KSampler', 'vae'], + [3, 'Conditioning', 'clip'], + ], + }, + 'Conditioning': { + nodeType: 'ttN conditioning', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'model'], + [1, 'KSampler', 'positive'], + [2, 'KSampler', 'negative'], + [3, 'KSampler', 'clip'], + ], + }, + 'KSampler': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + connections: [ + [0, 'KSampler2', 'model'], + [1, 'KSampler2', 'positive'], + [2, 'KSampler2', 'negative'], + [3, 'KSampler2', 'latent'], + [4, 'KSampler2', 'vae'], + [5, 'KSampler2', 'clip'], + [6, 'KSampler2', 'input_image_override'] + ], + widgets: { + image_output: 'Preview', + } + }, + 'KSampler2': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + widgets: { + upscale_method: '[hiresFix] nearest-exact', + image_output: 'Preview', + denoise: 0.5, + steps: 15 + } + }, + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe Upscale", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'loader1': { + nodeType: 'ttN pipeLoader_v2', + graphNode: null, + width: 315, + connections: [ + [0, 'ksampler', 'pipe'] + ], + }, + 'ksampler': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + connections: [ + [0, 'ksampler2', 'pipe'] + ], + widgets: { + image_output: 'Preview' + }, + }, + 'ksampler2': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + widgets: { + upscale_method: '[hiresFix] nearest-exact', + denoise: 0.5, + seed: 'toInput', + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + ]; + break; + + case "xyPlot": + entries = [ + { content: "Base xyPlot", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN tinyLoader', + graphNode: null, + width: 315, + connections: [ + [0, 'Conditioning', 'model'], + [1, 'KSampler', 'latent'], + [2, 'KSampler', 'vae'], + [3, 'Conditioning', 'clip'], + ], + }, + 'Conditioning': { + nodeType: 'ttN conditioning', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'model'], + [1, 'KSampler', 'positive'], + [2, 'KSampler', 'negative'], + [3, 'KSampler', 'clip'], + ], + }, + 'xyPlot': { + nodeType: 'ttN advanced xyPlot', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'adv_xyPlot'], + ], + }, + 'KSampler': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe xyPlot", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN pipeLoader_v2', + graphNode: null, + width: 315, + connections: [ + [0, 'KSampler', 'pipe'], + ], + }, + 'xyPlot': { + nodeType: 'ttN advanced xyPlot', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'adv_xyPlot'], + ], + }, + 'KSampler': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + ] + } + new LiteGraph.ContextMenu( entries, { event: e, parentMenu: prev_menu }, ref_window ); + } + async initialiseNodeMenu() { + const that = this; + setTimeout(async () => { + const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions; + LGraphCanvas.prototype.getNodeMenuOptions = function (node) { + const options = getNodeMenuOptions.apply(this, arguments); + node.setDirtyCanvas(true, true); + const ttNoptions = that.getTinyTerraNodeMenuItems(node) + options.splice(options.length - 1, 0, ...ttNoptions, null); + + return options; + }; + },500) + } + getTinyTerraNodeMenuItems(node) { + return [ + { + content: "🌏 Fullscreen Image Viewer", + callback: () => { openFullscreenApp(node) } + }, + { + content: "🌏 Pop-Out Image Viewer", + callback: () => { openPopoutViewer(node) } + }, + { + content: "🌏 Set Default Viewer Node", + callback: _setDefaultFullscreenNode + }, + { + content: "🌏 Clear Default Viewer Node", + callback: function () { + sessionStorage.removeItem('Comfy.Settings.ttN.default_fullscreen_node'); + } + }, + null, + { + content: "🌏 Default Node BG Color", + has_submenu: true, + callback: LGraphCanvas.ttNsetDefaultBGColor + }, + { + content: "🌏 Node Dimensions", + callback: () => { LGraphCanvas.prototype.ttNsetNodeDimension(node); } + }, + { + content: "🌏 Reload Node", + callback: () => { + const active_canvas = LGraphCanvas.active_canvas; + if (!active_canvas.selected_nodes || Object.keys(active_canvas.selected_nodes).length <= 1) { + tinyterraReloadNode(node); + } else { + for (var i in active_canvas.selected_nodes) { + tinyterraReloadNode(active_canvas.selected_nodes[i]); + } + } + } + }, + ] + } + handleKeydown(e) { + this.ctrlKey = !!e.ctrlKey + this.altKey = !!e.altKey + this.shiftKey = !!e.shiftKey + this.downKeys[e.key.toLocaleUpperCase()] = true + this.downKeys["^" + e.key.toLocaleUpperCase()] = true + } + handleKeyup(e) { + this.ctrlKey = !!e.ctrlKey + this.altKey = !!e.altKey + this.shiftKey = !!e.shiftKey + this.downKeys[e.key.toLocaleUpperCase()] = false + this.downKeys["^" + e.key.toLocaleUpperCase()] = false + } + injectTtnCss() { + const link = document.createElement("link"); + link.rel = "stylesheet"; + link.type = "text/css"; + link.href = "extensions/ComfyUI_tinyterraNodes/ttN.css"; + + link.onerror = function () { + if (this.href.includes("comfyui_tinyterranodes")) { + console.error("tinyterraNodes: Failed to load CSS file. Please check nodepack folder name."); + return; + } + this.href = "extensions/comfyui_tinyterranodes/ttN.css" + } + document.head.appendChild(link); + } +} + +export const tinyterra = new TinyTerra(); +window.tinyterra = tinyterra; + +app.registerExtension({ + name: "comfy.ttN", + setup() { + if (!localStorage.getItem("ttN.pysssss")) { + const ttNckpts = ['ttN pipeLoader_v2', "ttN pipeLoaderSDXL_v2", "ttN tinyLoader"] + let pysCheckpoints = app.ui.settings.getSettingValue('pysssss.ModelInfo.CheckpointNodes') + if (pysCheckpoints) { + for (let ckpt of ttNckpts) { + if (!pysCheckpoints.includes(ckpt)) { + pysCheckpoints = `${pysCheckpoints},${ckpt}` + } + } + app.ui.settings.setSettingValue('pysssss.ModelInfo.CheckpointNodes', pysCheckpoints) + } + + const ttNloras = ['ttN KSampler_v2', 'ttN pipeKSampler_v2', 'ttN pipeKSamplerAdvanced_v2', 'ttN pipeKSamplerSDXL_v2', ] + let pysLoras = app.ui.settings.getSettingValue('pysssss.ModelInfo.LoraNodes') + if (pysLoras) { + for (let lora of ttNloras) { + if (!pysLoras.includes(lora)) { + pysLoras = `${pysLoras},${lora}` + } + } + app.ui.settings.setSettingValue('pysssss.ModelInfo.LoraNodes', pysLoras) + } + if (pysCheckpoints && pysLoras) { + localStorage.setItem("ttN.pysssss", true) + } + } + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name.startsWith("ttN")) { + const origOnConfigure = nodeType.prototype.onConfigure; + nodeType.prototype.onConfigure = function () { + const r = origOnConfigure ? origOnConfigure.apply(this, arguments) : undefined; + let nodeVersion = nodeData.input.hidden?.ttNnodeVersion ? nodeData.input.hidden.ttNnodeVersion : null; + nodeType.ttNnodeVersion = nodeVersion; + this.properties['ttNnodeVersion'] = this.properties['ttNnodeVersion'] ? this.properties['ttNnodeVersion'] : nodeVersion; + if ((this.properties['ttNnodeVersion']?.split(".")[0] !== nodeVersion?.split(".")[0]) || (this.properties['ttNnodeVersion']?.split(".")[1] !== nodeVersion?.split(".")[1])) { + if (!this.properties['origVals']) { + this.properties['origVals'] = { bgcolor: this.bgcolor, color: this.color, title: this.title } + } + this.bgcolor = "#e76066"; + this.color = "#ff0b1e"; + this.title = this.title.includes("Node Version Mismatch") ? this.title : this.title + " - Node Version Mismatch" + } else if (this.properties['origVals']) { + this.bgcolor = this.properties.origVals.bgcolor; + this.color = this.properties.origVals.color; + this.title = this.properties.origVals.title; + delete this.properties['origVals'] + } + return r; + }; + } + }, + nodeCreated(node) { + if (["pipeLoader", "pipeLoaderSDXL"].includes(node.constructor.title)) { + for (let widget of node.widgets) { + if (widget.name === "control_after_generate") { + widget.value = "fixed" + } + } + } + } +}); diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNdropdown.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNdropdown.js new file mode 100644 index 0000000000000000000000000000000000000000..ac64f12c7a3a253ebc4a923a80b4abb02ab7a732 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNdropdown.js @@ -0,0 +1,265 @@ +// ttN Dropdown +let activeDropdown = null; + +class Dropdown { + constructor(inputEl, options, onSelect, isDict, manualOffset, hostElement) { + this.dropdown = document.createElement('ul'); + this.dropdown.setAttribute('role', 'listbox'); + this.dropdown.classList.add('ttN-dropdown'); + this.selectedIndex = -1; + this.inputEl = inputEl; + this.options = options; + this.onSelect = onSelect; + this.isDict = isDict; + this.manualOffsetX = manualOffset[0]; + this.manualOffsetY = manualOffset[1]; + this.hostElement = hostElement; + + this.focusedDropdown = this.dropdown; + + this.buildDropdown(); + + this.onKeyDownBound = this.onKeyDown.bind(this); + this.onWheelBound = this.onWheel.bind(this); + this.onClickBound = this.onClick.bind(this); + + this.addEventListeners(); + } + + buildDropdown() { + if (this.isDict) { + this.buildNestedDropdown(this.options, this.dropdown); + } else { + this.dropdown.classList.add('ttN-dropdown-scrollable'); + this.options.forEach((suggestion, index) => { + this.addListItem(suggestion, index, this.dropdown); + }); + } + + const inputRect = this.inputEl.getBoundingClientRect(); + if (isNaN(this.manualOffsetX) && this.manualOffsetX.includes('%')) { + this.manualOffsetX = (inputRect.height * (parseInt(this.manualOffsetX) / 100)) + } + if (isNaN(this.manualOffsetY) && this.manualOffsetY.includes('%')) { + this.manualOffsetY = (inputRect.width * (parseInt(this.manualOffsetY) / 100)) + } + this.dropdown.style.top = (inputRect.top + inputRect.height - this.manualOffsetX) + 'px'; + this.dropdown.style.left = (inputRect.left + inputRect.width - this.manualOffsetY) + 'px'; + + this.hostElement.appendChild(this.dropdown); + + activeDropdown = this; + } + + buildNestedDropdown(dictionary, parentElement, currentPath = '') { + let index = 0; + Object.keys(dictionary).forEach((key) => { + let extra_data; + const item = dictionary[key]; + if (typeof item === 'string') { extra_data = item; } + + let fullPath = currentPath ? `${currentPath}/${key}` : key; + if (extra_data) { fullPath = `${fullPath}###${extra_data}`; } + + if (typeof item === "object" && item !== null) { + const nestedDropdown = document.createElement('ul'); + nestedDropdown.setAttribute('role', 'listbox'); + nestedDropdown.classList.add('ttN-nested-dropdown'); + + const hasChildFolders = Object.values(item).some((child) => typeof child === 'object' && child !== null); + if (!hasChildFolders) { + nestedDropdown.classList.add('ttN-dropdown-scrollable'); + } + + const parentListItem = document.createElement('li'); + parentListItem.classList.add('folder'); + parentListItem.textContent = key; + parentListItem.appendChild(nestedDropdown); + parentListItem.addEventListener('mouseover', this.onMouseOver.bind(this, index, parentElement)); + parentElement.appendChild(parentListItem); + this.buildNestedDropdown(item, nestedDropdown, fullPath); + index = index + 1; + } else { + const listItem = document.createElement('li'); + listItem.classList.add('item'); + listItem.setAttribute('role', 'option'); + listItem.textContent = key; + listItem.addEventListener('mouseover', this.onMouseOver.bind(this, index, parentElement)); + listItem.addEventListener('mousedown', (e) => this.onMouseDown(key, e, fullPath)); + parentElement.appendChild(listItem); + index = index + 1; + } + }); + } + + addListItem(item, index, parentElement) { + const listItem = document.createElement('li'); + listItem.classList.add('item'); + listItem.setAttribute('role', 'option'); + listItem.textContent = item; + listItem.addEventListener('mouseover', () => this.onMouseOver(index)); + listItem.addEventListener('mousedown', (e) => this.onMouseDown(item, e)); + parentElement.appendChild(listItem); + } + + addEventListeners() { + document.addEventListener('keydown', this.onKeyDownBound); + this.dropdown.addEventListener('wheel', this.onWheelBound); + document.addEventListener('click', this.onClickBound); + } + + removeEventListeners() { + document.removeEventListener('keydown', this.onKeyDownBound); + this.dropdown.removeEventListener('wheel', this.onWheelBound); + document.removeEventListener('click', this.onClickBound); + } + + closeDropdown() { + if (activeDropdown === this) { + activeDropdown = null; + } + this.removeEventListeners(); + this.dropdown.remove(); + } + + onMouseOver(index, parentElement=null) { + if (parentElement) { + this.focusedDropdown = parentElement; + } + this.selectedIndex = index; + this.updateSelection(); + } + + onMouseDown(suggestion, event, fullPath='') { + event.preventDefault(); + this.onSelect(suggestion, fullPath); + this.closeDropdown(); + } + + onKeyDown(event) { + const enterKeyCode = 13; + const escKeyCode = 27; + const arrowUpKeyCode = 38; + const arrowDownKeyCode = 40; + const arrowRightKeyCode = 39; + const arrowLeftKeyCode = 37; + const tabKeyCode = 9; + + const items = Array.from(this.focusedDropdown.children); + const selectedItem = items[this.selectedIndex]; + + if (activeDropdown) { + if (event.keyCode === arrowUpKeyCode) { + event.preventDefault(); + this.selectedIndex = Math.max(0, this.selectedIndex - 1); + this.updateSelection(); + } + + else if (event.keyCode === arrowDownKeyCode) { + event.preventDefault(); + this.selectedIndex = Math.min(items.length - 1, this.selectedIndex + 1); + this.updateSelection(); + } + + else if (event.keyCode === arrowRightKeyCode && selectedItem) { + event.preventDefault(); + if (selectedItem.classList.contains('folder')) { + const nestedDropdown = selectedItem.querySelector('.ttN-nested-dropdown'); + if (nestedDropdown) { + this.focusedDropdown = nestedDropdown; + this.selectedIndex = 0; + this.updateSelection(); + } + } + } + + else if (event.keyCode === arrowLeftKeyCode && this.focusedDropdown !== this.dropdown) { + const parentDropdown = this.focusedDropdown.closest('.ttN-dropdown, .ttN-nested-dropdown').parentNode.closest('.ttN-dropdown, .ttN-nested-dropdown'); + if (parentDropdown) { + this.focusedDropdown = parentDropdown; + this.selectedIndex = Array.from(parentDropdown.children).indexOf(this.focusedDropdown.parentNode); + this.updateSelection(); + } + } + + else if ((event.keyCode === enterKeyCode || event.keyCode === tabKeyCode) && this.selectedIndex >= 0) { + event.preventDefault(); + if (selectedItem.classList.contains('item')) { + this.onSelect(items[this.selectedIndex].textContent); + this.closeDropdown(); + } + + const nestedDropdown = selectedItem.querySelector('.ttN-nested-dropdown'); + if (nestedDropdown) { + this.focusedDropdown = nestedDropdown; + this.selectedIndex = 0; + this.updateSelection(); + } + } + + else if (event.keyCode === escKeyCode) { + this.closeDropdown(); + } + } + } + + onWheel(event) { + event.preventDefault(); + event.stopPropagation(); + + const invertScroll = !!localStorage.getItem("Comfy.Settings.Comfy.InvertMenuScrolling"); + const delta = invertScroll ? -event.deltaY : event.deltaY; + const hoveredDropdown = event.target.closest('.ttN-dropdown, .ttN-nested-dropdown'); + const scrollTarget = hoveredDropdown || this.focusedDropdown || this.dropdown; + + if (scrollTarget.scrollHeight > scrollTarget.clientHeight) { + scrollTarget.scrollTop += delta; + return; + } + + const offsetStep = invertScroll + ? (event.deltaY < 0 ? 10 : -10) + : (event.deltaY < 0 ? -10 : 10); + + if (scrollTarget !== this.dropdown && scrollTarget.classList.contains('ttN-nested-dropdown')) { + const nestedTop = parseInt(scrollTarget.style.top, 10) || 0; + scrollTarget.style.top = `${nestedTop + offsetStep}px`; + return; + } + + const top = parseInt(this.dropdown.style.top, 10) || 0; + this.dropdown.style.top = `${top + offsetStep}px`; + } + + onClick(event) { + if (!this.dropdown.contains(event.target) && event.target !== this.inputEl) { + this.closeDropdown(); + } + } + + updateSelection() { + if (!this.focusedDropdown.children) { + this.dropdown.classList.add('selected'); + } else { + Array.from(this.focusedDropdown.children).forEach((li, index) => { + if (index === this.selectedIndex) { + li.classList.add('selected'); + li.scrollIntoView({ block: 'nearest' }); + } else { + li.classList.remove('selected'); + } + }); + } + } +} + +export function ttN_RemoveDropdown() { + if (activeDropdown) { + activeDropdown.closeDropdown(); + } +} + +export function ttN_CreateDropdown(inputEl, options, onSelect, isDict = false, manualOffset = [10,'100%'], hostElement = document.body) { + ttN_RemoveDropdown(); + new Dropdown(inputEl, options, onSelect, isDict, manualOffset, hostElement); +} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNdynamicWidgets.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNdynamicWidgets.js new file mode 100644 index 0000000000000000000000000000000000000000..b8a7fbfba7a1742f6b25483ce687e13b25c871fd --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNdynamicWidgets.js @@ -0,0 +1,560 @@ +import { app } from "../../scripts/app.js"; + +let origProps = {}; + +const findWidgetByName = (node, name) => node.widgets.find((w) => w.name === name); + +const doesInputLinkExist = (node, name) => node.inputs ? node.inputs.some((input) => input.link != null) : false; + +function updateNodeHeight(node) { + node.setSize([node.size[0], node.computeSize()[1]]); + app.canvas.dirty_canvas = true; +} + +function toggleWidget(node, widget, show = false, suffix = "") { + if (!widget || doesInputLinkExist(node, widget.name)) return; + if (!origProps[widget.name]) { + origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize, origComputedHeight: widget.computedHeight }; + } + const origSize = node.size; + + widget.type = show ? origProps[widget.name].origType : "ttNhidden" + suffix; + widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -4]; + widget.computedHeight = show ? origProps[widget.name].origComputedHeight : 0; + + widget.linkedWidgets?.forEach(w => toggleWidget(node, w, ":" + widget.name, show)); + + const height = show ? Math.max(node.computeSize()[1], origSize[1]) : node.size[1]; + node.setSize([node.size[0], height]); + app.canvas.dirty_canvas = true +} + +function widgetLogic(node, widget) { + switch (widget.name) { + case 'lora_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_clip_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_clip_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_strength'), true) + } + break; + + case 'lora1_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora1_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora1_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora1_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora1_clip_strength'), true) + } + break; + + case 'lora2_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora2_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora2_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora2_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora2_clip_strength'), true) + } + break; + + case 'lora3_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora3_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora3_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora3_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora3_clip_strength'), true) + } + break; + + case 'refiner_ckpt_name': + let refiner_lora1 = findWidgetByName(node, 'refiner_lora1_name')?.value + let refiner_lora2 = findWidgetByName(node, 'refiner_lora2_name')?.value + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'refiner_vae_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_config_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_clip_skip')) + toggleWidget(node, findWidgetByName(node, 'refiner_loras')) + toggleWidget(node, findWidgetByName(node, 'positive_ascore')) + toggleWidget(node, findWidgetByName(node, 'negative_ascore')) + + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_model_strength')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_clip_strength')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_model_strength')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'refiner_vae_name'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_config_name'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_clip_skip'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_loras'), true) + toggleWidget(node, findWidgetByName(node, 'positive_ascore'), true) + toggleWidget(node, findWidgetByName(node, 'negative_ascore'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_name'), true) + if (refiner_lora1 !== "None") { + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_clip_strength'), true) + } + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_name'), true) + if (refiner_lora2 !== "None") { + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_clip_strength'), true) + } + } + break; + + case 'rescale_after_model': + if (widget.value === false) { + toggleWidget(node, findWidgetByName(node, 'rescale_method')) + toggleWidget(node, findWidgetByName(node, 'rescale')) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else { + toggleWidget(node, findWidgetByName(node, 'rescale_method'), true) + toggleWidget(node, findWidgetByName(node, 'rescale'), true) + + let rescale_value = findWidgetByName(node, 'rescale').value + + if (rescale_value === 'by percentage') { + toggleWidget(node, findWidgetByName(node, 'percent'), true) + } else if (rescale_value === 'to Width/Height') { + toggleWidget(node, findWidgetByName(node, 'width'), true) + toggleWidget(node, findWidgetByName(node, 'height'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'longer_side'), true) + } + toggleWidget(node, findWidgetByName(node, 'crop'), true) + } + break; + + case 'rescale': + let rescale_after_model = findWidgetByName(node, 'rescale_after_model')?.value + let hiresfix = findWidgetByName(node, 'upscale_method') || findWidgetByName(node, 'rescale_method') + if (typeof(hiresfix.value) == 'string' && hiresfix.value.includes('hiresFix')) { + hiresfix = true + } else { + hiresfix = false + } + if (widget.value === 'by percentage' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'percent'), true) + } else if (widget.value === 'to Width/Height' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width'), true) + toggleWidget(node, findWidgetByName(node, 'height'), true) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + } else if (widget.value === 'to longer side - maintain aspect' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'longer_side'), true) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'percent')) + } else if (widget.value === 'None' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'percent')) + } else { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'percent')) + } + break; + + case 'upscale_method': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + toggleWidget(node, findWidgetByName(node, 'upscale_model_name')) + toggleWidget(node, findWidgetByName(node, 'rescale')) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + } else { + if (typeof(widget.value) === 'string' && widget.value.includes('[hiresFix]')) { + let rescale = findWidgetByName(node, 'rescale') + toggleWidget(node, rescale, true) + if (rescale?.value === 'by percentage') { + toggleWidget(node, findWidgetByName(node, 'percent'), true) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else if (rescale?.value === 'to Width/Height') { + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'width'), true) + toggleWidget(node, findWidgetByName(node, 'height'), true) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else if (rescale?.value === 'to Width/Height') { + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side'), true) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else { + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } + toggleWidget(node, findWidgetByName(node, 'upscale_model_name'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'upscale_model_name')) + toggleWidget(node, findWidgetByName(node, 'rescale')) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'factor'), true) + toggleWidget(node, findWidgetByName(node, 'crop'), true) + } + } + break; + + case 'image_output': + if (['Hide', 'Preview'].includes(widget.value)) { + toggleWidget(node, findWidgetByName(node, 'save_prefix')) + toggleWidget(node, findWidgetByName(node, 'output_path')) + toggleWidget(node, findWidgetByName(node, 'embed_workflow')) + toggleWidget(node, findWidgetByName(node, 'number_padding')) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing')) + toggleWidget(node, findWidgetByName(node, 'file_type')) + } else if (['Save', 'Hide/Save', 'Disabled'].includes(widget.value)) { + toggleWidget(node, findWidgetByName(node, 'save_prefix'), true) + toggleWidget(node, findWidgetByName(node, 'output_path'), true) + toggleWidget(node, findWidgetByName(node, 'number_padding'), true) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing'), true) + toggleWidget(node, findWidgetByName(node, 'file_type'), true) + const fileTypeValue = findWidgetByName(node, 'file_type')?.value + if (['png', 'webp'].includes(fileTypeValue)) { + toggleWidget(node, findWidgetByName(node, 'embed_workflow'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'embed_workflow')) + } + } + break; + + case 'text_output': + if (widget.value === "Preview") { + toggleWidget(node, findWidgetByName(node, 'save_prefix')) + toggleWidget(node, findWidgetByName(node, 'output_path')) + toggleWidget(node, findWidgetByName(node, 'number_padding')) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing')) + toggleWidget(node, findWidgetByName(node, 'file_type')) + } else if (widget.value === "Save") { + toggleWidget(node, findWidgetByName(node, 'save_prefix'), true) + toggleWidget(node, findWidgetByName(node, 'output_path'), true) + toggleWidget(node, findWidgetByName(node, 'number_padding'), true) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing'), true) + toggleWidget(node, findWidgetByName(node, 'file_type'), true) + } + + case 'add_noise': + if (widget.value === "disable") { + toggleWidget(node, findWidgetByName(node, 'noise_seed')) + toggleWidget(node, findWidgetByName(node, 'control_after_generate')) + } else { + toggleWidget(node, findWidgetByName(node, 'noise_seed'), true) + toggleWidget(node, findWidgetByName(node, 'control_after_generate'), true) + } + break; + + case 'ckpt_B_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'config_B_name')) + } else { + toggleWidget(node, findWidgetByName(node, 'config_B_name'), true) + } + break; + + case 'ckpt_C_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'config_C_name')) + } else { + toggleWidget(node, findWidgetByName(node, 'config_C_name'), true) + } + break; + + case 'save_model': + if (widget.value === "True") { + toggleWidget(node, findWidgetByName(node, 'save_prefix'), true) + + } else { + toggleWidget(node, findWidgetByName(node, 'save_prefix')) + } + break; + + case 'num_loras': + let number_to_show = widget.value + 1 + for (let i = 0; i < number_to_show; i++) { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_name'), true) + if (findWidgetByName(node, 'mode').value === "simple") { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength'), true) + } + } + for (let i = number_to_show; i < 21; i++) { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_name')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength')) + } + updateNodeHeight(node); + break; + + case 'mode': + if (node.constructor.title === "pipeLoraStack") { + let number_to_show2 = findWidgetByName(node, 'num_loras')?.value + 1 + for (let i = 0; i < number_to_show2; i++) { + if (widget.value === "simple") { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength'), true)} + } + updateNodeHeight(node) + break; + } else if (node.constructor.title === "advPlot combo") { + if (widget.value === 'all') { + toggleWidget(node, findWidgetByName(node, 'start_from')) + toggleWidget(node, findWidgetByName(node, 'end_with')) + toggleWidget(node, findWidgetByName(node, 'select')) + toggleWidget(node, findWidgetByName(node, 'selection')) + } else if (widget.value === 'range') { + toggleWidget(node, findWidgetByName(node, 'start_from'), true) + toggleWidget(node, findWidgetByName(node, 'end_with'), true) + toggleWidget(node, findWidgetByName(node, 'select')) + toggleWidget(node, findWidgetByName(node, 'selection')) + } else { + toggleWidget(node, findWidgetByName(node, 'start_from')) + toggleWidget(node, findWidgetByName(node, 'end_with')) + toggleWidget(node, findWidgetByName(node, 'select'), true) + toggleWidget(node, findWidgetByName(node, 'selection'), true) + } + } + + case 'empty_latent_aspect': + if (widget.value !== 'width x height [custom]') { + toggleWidget(node, findWidgetByName(node, 'empty_latent_width')) + toggleWidget(node, findWidgetByName(node, 'empty_latent_height')) + } else { + toggleWidget(node, findWidgetByName(node, 'empty_latent_width'), true) + toggleWidget(node, findWidgetByName(node, 'empty_latent_height'), true) + } + break; + + case 'conditioning_aspect': + if (widget.value !== 'width x height [custom]') { + toggleWidget(node, findWidgetByName(node, 'conditioning_width')) + toggleWidget(node, findWidgetByName(node, 'conditioning_height')) + } else { + toggleWidget(node, findWidgetByName(node, 'conditioning_width'), true) + toggleWidget(node, findWidgetByName(node, 'conditioning_height'), true) + } + break; + + case 'target_aspect': + if (widget.value !== 'width x height [custom]') { + toggleWidget(node, findWidgetByName(node, 'target_width')) + toggleWidget(node, findWidgetByName(node, 'target_height')) + } else { + toggleWidget(node, findWidgetByName(node, 'target_width'), true) + toggleWidget(node, findWidgetByName(node, 'target_height'), true) + } + break; + + case 'toggle': + widget.type = 'toggle' + widget.options = {on: 'Enabled', off: 'Disabled'} + break; + + case 'refiner_steps': + if (widget.value == 0) { + toggleWidget(node, findWidgetByName(node, 'refiner_cfg')) + toggleWidget(node, findWidgetByName(node, 'refiner_denoise')) + } else { + toggleWidget(node, findWidgetByName(node, 'refiner_cfg'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_denoise'), true) + } + break; + + case 'sampler_state': + if (widget.value == 'Hold') { + findWidgetByName(node, 'control_after_generate').value = 'fixed' + } + break; + + case 'print_to_console': + if (widget.value == false) { + toggleWidget(node, findWidgetByName(node, 'console_title')) + toggleWidget(node, findWidgetByName(node, 'console_color')) + } else { + toggleWidget(node, findWidgetByName(node, 'console_title'), true) + toggleWidget(node, findWidgetByName(node, 'console_color'), true) + } + break; + + case 'sampling': + if (widget.value == 'Default') { + toggleWidget(node, findWidgetByName(node, 'zsnr')) + } else { + toggleWidget(node, findWidgetByName(node, 'zsnr'), true) + } + break; + + case 'range_mode': + function setWidgetOptions(widget, options) { + widget.options.step = options.step; + widget.options.round = options.round; + widget.options.precision = options.precision; + } + + if (widget.value.startsWith('step')) { + toggleWidget(node, findWidgetByName(node, 'stop')) + toggleWidget(node, findWidgetByName(node, 'step'), true) + toggleWidget(node, findWidgetByName(node, 'include_stop')) + } else { + toggleWidget(node, findWidgetByName(node, 'stop'), true) + toggleWidget(node, findWidgetByName(node, 'step')) + toggleWidget(node, findWidgetByName(node, 'include_stop'), true) + } + if (widget.value.endsWith('int')) { + const intOptions = { + step: 10, + round: 1, + precision: 0 + }; + const start_widget = findWidgetByName(node, 'start') + const stop_widget = findWidgetByName(node, 'stop') + const step_widget = findWidgetByName(node, 'step') + setWidgetOptions(start_widget, intOptions); + setWidgetOptions(stop_widget, intOptions); + setWidgetOptions(step_widget, intOptions); + + } else { + const floatOptions = { + step: 0.1, + round: 0.01, + precision: 2 + }; + const start_widget = findWidgetByName(node, 'start') + const stop_widget = findWidgetByName(node, 'stop') + const step_widget = findWidgetByName(node, 'step') + setWidgetOptions(start_widget, floatOptions); + setWidgetOptions(stop_widget, floatOptions); + setWidgetOptions(step_widget, floatOptions); + } + break; + + case 'file_type': + const imageOutputValue = findWidgetByName(node, 'image_output').value + if (['png', 'webp'].includes(widget.value) && ['Save', 'Hide/Save', 'Disabled'].includes(imageOutputValue)) { + toggleWidget(node, findWidgetByName(node, 'embed_workflow'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'embed_workflow')) + } + break; + + case 'replace_mode': + if (widget.value == true) { + toggleWidget(node, findWidgetByName(node, 'search_string'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'search_string')) + } + } +} + +const getSetWidgets = ['rescale_after_model', 'rescale', 'image_output', + 'lora_name', 'lora1_name', 'lora2_name', 'lora3_name', + 'refiner_lora1_name', 'refiner_lora2_name', 'refiner_steps', 'upscale_method', + 'image_output', 'text_output', 'add_noise', + 'ckpt_B_name', 'ckpt_C_name', 'save_model', 'refiner_ckpt_name', + 'num_loras', 'mode', 'toggle', 'empty_latent_aspect', 'conditioning_aspect', 'target_aspect', 'sampler_state', + 'print_to_console', 'sampling', 'range_mode', 'file_type', 'replace_mode'] +const getSetTitles = [ + "hiresfixScale", + "pipeLoader", + "pipeLoader v1 (Legacy)", + "pipeLoaderSDXL", + "pipeLoaderSDXL v1 (Legacy)", + "pipeKSampler", + "pipeKSampler v1 (Legacy)", + "pipeKSamplerAdvanced", + "pipeKSamplerAdvanced v1 (Legacy)", + "pipeKSamplerSDXL", + "pipeKSamplerSDXL v1 (Legacy)", + "imageRemBG", + "imageOutput", + "multiModelMerge", + "pipeLoraStack", + "pipeEncodeConcat", + "tinyKSampler", + "debugInput", + "tinyLoader", + "advPlot range", + "advPlot combo", + "advPlot images", + "advPlot string", + "textOutput", +]; + +function getSetters(node) { + if (node.widgets) + for (const w of node.widgets) { + if (getSetWidgets.includes(w.name)) { + widgetLogic(node, w); + let widgetValue = w.value; + + // Define getters and setters for widget values + Object.defineProperty(w, 'value', { + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + widgetLogic(node, w); + } + } + }); + } + } +} + +app.registerExtension({ + name: "comfy.ttN.dynamicWidgets", + + nodeCreated(node) { + const nodeTitle = node.constructor.title; + if (getSetTitles.includes(nodeTitle)) { + getSetters(node); + } + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNembedAC.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNembedAC.js new file mode 100644 index 0000000000000000000000000000000000000000..e1d37d6a642519b6f9d8d89abb39c6116f614b80 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNembedAC.js @@ -0,0 +1,290 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttNdropdown.js"; + +// Initialize some global lists and objects. +let autoCompleteDict = {}; // {prefix: [suggestions]} +let autoCompleteHierarchy = {}; +let nsp_keys = ['3d-terms', 'adj-architecture', 'adj-beauty', 'adj-general', 'adj-horror', 'album-cover', 'animals', 'artist', 'artist-botanical', 'artist-surreal', 'aspect-ratio', 'band', 'bird', 'body-fit', 'body-heavy', 'body-light', 'body-poor', 'body-shape', 'body-short', 'body-tall', 'bodyshape', 'camera', 'camera-manu', 'celeb', 'color', 'color-palette', 'comic', 'cosmic-galaxy', 'cosmic-nebula', 'cosmic-star', 'cosmic-terms', 'details', 'dinosaur', 'eyecolor', 'f-stop', 'fantasy-creature', 'fantasy-setting', 'fish', 'flower', 'focal-length', 'foods', 'forest-type', 'fruit', 'games', 'gen-modifier', 'gender', 'gender-ext', 'hair', 'hd', 'identity', 'identity-adult', 'identity-young', 'iso-stop', 'landscape-type', 'movement', 'movie', 'movie-director', 'nationality', 'natl-park', 'neg-weight', 'noun-beauty', 'noun-emote', 'noun-fantasy', 'noun-general', 'noun-horror', 'occupation', 'penciller', 'photo-term', 'pop-culture', 'pop-location', 'portrait-type', 'punk', 'quantity', 'rpg-Item', 'scenario-desc', 'site', 'skin-color', 'style', 'tree', 'trippy', 'water', 'wh-site'] + +function getFileName(path) { + return path.split(/[\/:\\]/).pop(); +} + +function getCurrentWord(widget) { + const formattedInput = widget.inputEl.value.replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + const words = formattedInput.split(' '); + + const adjustedInput = widget.inputEl.value.substring(0, widget.inputEl.selectionStart) + .replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + + const currentWordPosition = adjustedInput.split(' ').length - 1; + + return words[currentWordPosition].toLowerCase(); +} + +function isTriggerWord(word) { + for (let prefix in autoCompleteDict) { + if ((prefix.startsWith(word) && word.length > 1) || word.startsWith(prefix)) return true; + } + return false; +} + +const _generatePrefixes = (str) => { + const prefixes = []; + while (str.length > 1) { + prefixes.push(str); + str = str.substring(0, str.length - 1); + } + return prefixes; +}; + +function _cleanInputWord(word) { + let prefixesToRemove = []; + for (let prefix in autoCompleteDict) { + prefixesToRemove = [...prefixesToRemove, ..._generatePrefixes(prefix)]; + } + let cleanedWord = prefixesToRemove.reduce((acc, prefix) => acc.replace(prefix, ''), word.toLowerCase()); + if (cleanedWord.includes(':')) { + const parts = cleanedWord.split(':'); + cleanedWord = parts[0]; + } + return cleanedWord.replace(/\//g, "\\"); +} + +function getSuggestionsForWord(word) { + let suggestions = []; + for (let prefix in autoCompleteDict) { + if ((prefix.startsWith(word) && word.length > 1) || word.startsWith(prefix)) { + suggestions = autoCompleteDict['fpath_' + prefix]; // Get suggestions from the dictionary + break; + } + } + const cleanedWord = _cleanInputWord(word); + // Filter suggestions based on the cleaned word + return suggestions.filter(suggestion => + suggestion.toLowerCase().includes(cleanedWord) || getFileName(suggestion).toLowerCase().includes(cleanedWord) + ); +} + + +function _convertListToHierarchy(list) { + const hierarchy = {}; + list.forEach(item => { + const parts = item.split(/:\\|\\/); + let node = hierarchy; + parts.forEach((part, idx) => { + node = node[part] = (idx === parts.length - 1) ? null : (node[part] || {}); + }); + }); + return hierarchy; +} + +function _insertSuggestion(widget, suggestion) { + const formattedInput = widget.inputEl.value.replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + const inputSegments = formattedInput.split(' '); + + const adjustedInput = widget.inputEl.value.substring(0, widget.inputEl.selectionStart) + .replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + const currentSegmentIndex = adjustedInput.split(' ').length - 1; + + let matchedPrefix = ''; + let currentSegment = inputSegments[currentSegmentIndex].toLowerCase(); + if (["loras", "refiner_loras"].includes(widget.name) && ['', ' ','<','')) { + oldSuffix = oldSuffix.split('>')[0] + '>'; + } + suffix = oldSuffix ? ':' + oldSuffix : ':1>'; + } + if (matchedPrefix === '__') { + suffix = '__'; + } + + inputSegments[currentSegmentIndex] = matchedPrefix + suggestion + suffix; + return inputSegments.join(' '); +} + +function showSuggestionsDropdown(widget, suggestions) { + const hierarchy = _convertListToHierarchy(suggestions); + ttN_CreateDropdown(widget.inputEl, hierarchy, selected => { + widget.inputEl.value = _insertSuggestion(widget, selected); + }, true); +} + + +function _initializeAutocompleteData(initialList, prefix) { + autoCompleteDict['fpath_' + prefix] = initialList + autoCompleteDict[prefix] = initialList.map(getFileName).map(item => prefix + item); +} + +function _initializeAutocompleteList(initialList, prefix) { + autoCompleteDict['fpath_' + prefix] = initialList + autoCompleteDict[prefix] = initialList.map(item => prefix + item); +} + +function _isRelevantWidget(widget) { + return (["customtext", "ttNhidden"].includes(widget.type) && (widget.dynamicPrompts !== false) || widget.dynamicPrompts) && !_isLorasWidget(widget); +} + +function _isLorasWidget(widget) { + return (["customtext", "ttNhidden"].includes(widget.type) && ["loras", "refiner_loras"].includes(widget.name)); +} + +function findPysssss(lora=false) { + const found = JSON.parse(app.ui.settings.getSettingValue('pysssss.AutoCompleter')) || false; + if (found && lora) { + return JSON.parse(localStorage.getItem("pysssss.AutoCompleter.ShowLoras")) || false; + } + return found; +} + +function _attachInputHandler(widget) { + if (!widget.ttNhandleInput) { + widget.ttNhandleInput = () => { + if (findPysssss()) { + return + } + + let currentWord = getCurrentWord(widget); + if (isTriggerWord(currentWord)) { + const suggestions = getSuggestionsForWord(currentWord); + if (suggestions.length > 0) { + showSuggestionsDropdown(widget, suggestions); + } else { + ttN_RemoveDropdown(); + } + } else { + ttN_RemoveDropdown(); + } + }; + } + ['input', 'mousedown'].forEach(event => { + widget?.inputEl?.removeEventListener(event, widget.ttNhandleInput); + if (findPysssss()) { + return + } + widget?.inputEl?.addEventListener(event, widget.ttNhandleInput); + }); +} + +function _attachLorasHandler(widget) { + if (!widget.ttNhandleLorasInput) { + widget.ttNhandleLorasInput = () => { + if (findPysssss(true)) { + return + } + let currentWord = getCurrentWord(widget); + if (['',' ','<',' 0) { + showSuggestionsDropdown(widget, suggestions); + } else { + ttN_RemoveDropdown(); + } + } else { + ttN_RemoveDropdown(); + } + }; + } + + ['input', 'mouseup'].forEach(event => { + widget?.inputEl?.removeEventListener(event, widget.ttNhandleLorasInput); + if (findPysssss(true)) { + return + } + widget?.inputEl?.addEventListener(event, widget.ttNhandleLorasInput); + }); + + if (!widget.ttNhandleScrollInput) { + widget.ttNhandleScrollInput = (event) => { + event.preventDefault(); + + const step = event.ctrlKey ? 0.1 : 0.01; + + // Determine the scroll direction + const direction = Math.sign(event.deltaY); // Will be -1 for scroll up, 1 for scroll down + + // Get the current selection + const inputEl = widget.inputEl; + let selectionStart = inputEl.selectionStart; + let selectionEnd = inputEl.selectionEnd; + const selected = inputEl.value.substring(selectionStart, selectionEnd); + + if (selected === 'lora' || selected === 'skip') { + const swapWith = selected === 'lora' ? 'skip' : 'lora'; + inputEl.value = inputEl.value.substring(0, selectionStart) + swapWith + inputEl.value.substring(selectionEnd); + inputEl.setSelectionRange(selectionStart, selectionStart + swapWith.length); + return + } + + // Expand the selection to make sure the whole number is selected + while (selectionStart > 0 && /\d|\.|-/.test(inputEl.value.charAt(selectionStart - 1))) { + selectionStart--; + } + while (selectionEnd < inputEl.value.length && /\d|\.|-/.test(inputEl.value.charAt(selectionEnd))) { + selectionEnd++; + } + + const selectedText = inputEl.value.substring(selectionStart, selectionEnd); + + // Check if the selected text is a number + if (!isNaN(selectedText) && selectedText.trim() !== '') { + let trail = selectedText.split('.')[1]?.length; + if (!trail || trail < 2) { + trail = 2; + } + + const currentValue = parseFloat(selectedText); + let modifiedValue = currentValue - direction * step; + + // Format the number to avoid floating point precision issues and then convert back to a float + modifiedValue = parseFloat(modifiedValue.toFixed(trail)); + + // Replace the selected text with the new value, keeping the selection + inputEl.value = inputEl.value.substring(0, selectionStart) + modifiedValue + inputEl.value.substring(selectionEnd); + const newSelectionEnd = selectionStart + modifiedValue.toString().length; + inputEl.setSelectionRange(selectionStart, newSelectionEnd); + } + }; + } + + widget.inputEl.removeEventListener('wheel', widget.ttNhandleScrollInput); + widget.inputEl.addEventListener('wheel', widget.ttNhandleScrollInput); +} + +app.registerExtension({ + name: "comfy.ttN.AutoComplete", + async init() { + const embs = await api.fetchApi("/embeddings") + const loras = await api.fetchApi("/ttN/loras") + + _initializeAutocompleteData(await embs.json(), 'embedding:'); + _initializeAutocompleteData(await loras.json(), ' i.src.includes("filename")); + return img ? img.src : null; +} + +function _findLatentPreviewImageSRC(node) { + if (!node.imgs) return null; + + if (node.imageIndex != null && + node.imageIndex < node.imgs.length) { + return node.imgs[node.imageIndex].src; + } + + if (node.overIndex != null && + node.overIndex < node.imgs.length) { + return node.imgs[node.overIndex].src; + } + + return null; +} + +function updateImageTLDE() { + for (let node of app.graph._nodes) { + if (!node.imgs) continue; + + const finalSrc = _findFullImageSRC(node); + const latentSrc = _findLatentPreviewImageSRC(node); + + ttN_srcDict[node.id] = ttN_srcDict[node.id] || []; + + let previousLength = ttN_srcDict[node.id].length; + + if ( + finalSrc && + finalSrc.includes("filename") && + !ttN_srcDict[node.id].includes(finalSrc) + ) { + ttN_srcDict[node.id].push(finalSrc); + + // CAP HISTORY + if (ttN_srcDict[node.id].length > MAX_HISTORY_PER_NODE) { + ttN_srcDict[node.id].shift(); + } + } + + const viewers = + [...TTNViewer.instances] + .filter(v => v.node.id === node.id); + + for (const viewer of viewers) { + + const wasLast = viewer.imageIndex === previousLength - 1; + + if (finalSrc && wasLast && viewer.slideshow) { + viewer.setImage(-1); + continue; + } + + if ( + viewer.slideshow && + wasLast && + latentSrc && + !latentSrc.includes("filename") && + !finalSrc + ) { + viewer.image.src = latentSrc; + } + } + } + + const validNodeIds = new Set(app.graph._nodes.map(n => n.id)); + if (validNodeIds.size > 0) { + Object.keys(ttN_srcDict).forEach(id => { + if (!validNodeIds.has(Number(id))) { + delete ttN_srcDict[id]; + } + }); + } + + saveSrcDict(); + + TTNViewer.instances.forEach(v => v.refreshImages()); +} + +let _updateScheduled = null; + +function scheduleImageUpdate(delay = 300) { + if (_updateScheduled) return; + + _updateScheduled = setTimeout(() => { + updateImageTLDE(); + _updateScheduled = null; + }, delay); +} + +function _handleExecutedEvent(e) { + scheduleImageUpdate(500); +} + +function clearSrcDict() { + ttN_srcDict = {}; + saveSrcDict(); +} + +function _handleReconnectingEvent(e) { + clearSrcDict(); + localStorage.removeItem(STORAGE_KEYS.DEFAULTNODE); +} + + +api.addEventListener("status", _handleExecutedEvent); +api.addEventListener("progress", _handleExecutedEvent); +api.addEventListener("execution_cached", _handleExecutedEvent); +api.addEventListener("reconnecting", _handleReconnectingEvent); + +/* ========================================================= + VIEWER ENGINE +========================================================= */ + +class TTNViewer { + static instances = new Set(); + static fullscreenInstance = null; + + constructor(node, doc, mode = "fullscreen") { + this.node = node; + this.doc = doc; + this.mode = mode; + + this.imageIndex = -1; + + // Compare state + this.compareBase = null; + this.compareTarget = null; + this.comparing = false; + + // Transform + this.scale = 1; + this.offsetX = 0; + this.offsetY = 0; + this.dragging = false; + this.dragStartX = 0; + this.dragStartY = 0; + + this.autohide = JSON.parse(localStorage.getItem(STORAGE_KEYS.AUTOHIDE)) ?? true; + this.invertctrl = JSON.parse(localStorage.getItem(STORAGE_KEYS.INVERT)) ?? false; + this.fitscreentoggle = JSON.parse(localStorage.getItem(STORAGE_KEYS.FITSCREEN)) ?? true; + + this.slideshow = true; + this.hideTimeout = null; + + this._lastWheelTime = 0; + this._resizeObserver = null; + this._wheelOptions = { passive: false }; + this._lastSignature = null; + + TTNViewer.instances.add(this); + if (this.mode === "popout") { + window.addEventListener("storage", () => { + this.refreshImages(); + }); + } + this.init(); + } + + /* ================= INIT ================= */ + + init() { + this.injectCSS(); + this.createLayout(); + this.attachEvents(); + this.refreshImages(); + + if (this.mode === "fullscreen") this.wrapper.requestFullscreen(); + } + + injectCSS() { + if (this.doc.getElementById("ttn-viewer-style")) return; + + const style = this.doc.createElement("style"); + style.id = "ttn-viewer-style"; + style.innerHTML = ` + html, body { + margin:0; + padding:0; + width:100%; + height:100%; + background:black; + } + + .hidden { + transition: opacity 0.5s, visibility 0.5s, transform 0.2s ease!important; + opacity: 0!important; + visibility: hidden!important; + } + + .ttn-wrapper { + position: fixed; + inset: 0; + width:100%; + height:100%; + display:flex; + justify-content:center; + align-items:center; + transition: background 0.3s; + background-color: #1f1f1f; + } + + .ttn-wrapper.slideshow { + background:black; + } + + .ttn-main-img { + position:absolute; + transform-origin: 0 0; + max-width:none; + max-height:none; + user-select:none; + transform: translateZ(0); + } + + .ttn-previews { + position: absolute; + bottom: 0; + left: 0; + display: flex; + width: max-content; + height: 110px; + transition: transform 0.2s ease; + align-items: flex-end; + background: black; + } + + .ttn-img { + height: 90px; + border: 10px solid black; + cursor: pointer; + display: block; + transition: height 0.4s ease, transform 0.4s ease; + background: black; + box-sizing: content-box; + } + + + .ttn-img.active { + height: 140px; + z-index: 10; + transition: 0.1s; + } + + .ttn-img.before { + transform: scale(1.01); + } + + .ttn-img.before:hover { + height: 110px!important; + z-index: 10; + } + + .ttn-img.after { + transform: scale(1.01); + } + + .ttn-img.after:hover { + height: 110px!important; + z-index: 10; + } + .ttn-img.compare-base { + border:10px solid cyan; + } + + .ttn-img.compare-target { + border:10px solid red; + } + + .ttn-context { + position:absolute; + background:#222; + color:white; + padding:5px; + border:1px solid #555; + z-index:9999; + font-size:14px; + } + + .ttn-context div { + padding:4px 10px; + cursor:pointer; + } + + .ttn-context div:hover { + background:#444; + } + + .settingsBtn { + position: absolute; + top: 10px; + right: 10px; + z-index: 20; + background: gray; + color: white; + border-width: medium; + border-color: silver; + box-sizing: content-box; + } + + .settingsMenu { + position: absolute; + top: 35px; + right: 10px; + background: #222; + padding: 10px; + border: 1px solid #555; + z-Index: 20; + width: 140px; + box-sizing: content-box; + } + + .ttn-btn { + width:stretch; + background: #202020; + border-color: black; + color: gray; + margin: 5px; + padding: 5px; + } + + .ttN-dropdown, .ttN-nested-dropdown { + position: relative; + box-sizing: border-box; + background-color: #171717; + box-shadow: 0 4px 4px rgba(255, 255, 255, .25); + padding: 0; + margin: 0; + list-style: none; + z-index: 1000; + overflow: visible; + max-height: fit-content; + max-width: fit-content; + color: white; + } + + .ttN-dropdown { + position: absolute; + border-radius: 0; + } + + .ttN-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + min-width: 220px; + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; + } + + .ttN-nested-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar { + width: 10px; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track { + background: #121212; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb { + background: #4b4b4b; + border-radius: 8px; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover { + background: #646464; + } + + /* Style for final items */ + .ttN-dropdown li.item, .ttN-nested-dropdown li.item { + font-weight: normal; + min-width: max-content; + } + + /* Style for folders (parent items) */ + .ttN-dropdown li.folder, .ttN-nested-dropdown li.folder { + cursor: default; + position: relative; + border-right: 3px solid #005757; + } + + .ttN-dropdown li.folder::after, .ttN-nested-dropdown li.folder::after { + content: ">"; + position: absolute; + right: 2px; + font-weight: normal; + } + + .ttN-dropdown li, .ttN-nested-dropdown li { + padding: 4px 10px; + cursor: pointer; + font-family: system-ui; + font-size: 0.7rem; + position: relative; + } + + /* Style for nested dropdowns */ + .ttN-nested-dropdown { + position: absolute; + top: 0; + left: 100%; + margin: 0; + border: none; + display: none; + } + + .ttN-dropdown li.selected > .ttN-nested-dropdown, + .ttN-nested-dropdown li.selected > .ttN-nested-dropdown { + display: block; + border: none; + } + + .ttN-dropdown li.selected, + .ttN-nested-dropdown li.selected { + background-color: #222222; + border: none; + } + `; + this.doc.head.appendChild(style); + } + + createLayout() { + this.wrapper = this.doc.createElement("div"); + this.wrapper.className = "ttn-wrapper slideshow"; + this.doc.body.appendChild(this.wrapper); + + this.image = this.doc.createElement("img"); + this.image.className = "ttn-main-img"; + this.wrapper.appendChild(this.image); + + this.previewBar = this.doc.createElement("div"); + this.previewBar.className = "ttn-previews hidden"; + this.wrapper.appendChild(this.previewBar); + + this.settingsBtn = this.doc.createElement("button"); + this.settingsBtn.innerText = "⚙"; + this.settingsBtn.className = "settingsBtn hidden" + this.wrapper.appendChild(this.settingsBtn); + + this.settingsBtn.onclick = () => + this.toggleSettingsMenu(); + } + + /* ================= IMAGE ================= */ + + refreshImages() { + const list = ttN_srcDict[this.node.id] || []; + + const newSignature = list.join("|"); + if (this._lastSignature === newSignature) return; + this._lastSignature = newSignature; + + this.previewBar.innerHTML = ""; + + list.forEach((src, i) => { + const img = this.doc.createElement("img"); + img.src = src; + img.className = "ttn-img"; + + img.onclick = () => this.setImage(i); + + img.oncontextmenu = (e) => { + e.preventDefault(); + this.ttNcontextMenu(img, i); + }; + + this.previewBar.appendChild(img); + }); + + if (list.length && this.imageIndex === -1) { + this.setImage(list.length - 1); + } + + this.updatePreviewHighlight(); + } + + setImage(i) { + const list = ttN_srcDict[this.node.id] || []; + if (!list.length) return; + + if (i === -1) { + i = list.length - 1; + } else { + i = ((i % list.length) + list.length) % list.length; + } + + this.imageIndex = i; + this.image.src = list[i]; + + this.updatePreviewHighlight(); + + const activeThumb = this.previewBar.children[i]; + + if (activeThumb && !activeThumb.complete) { + activeThumb.onload = () => { + requestAnimationFrame(() => + this.applyPreviewTranslation() + ); + }; + } else { + requestAnimationFrame(() => + this.applyPreviewTranslation() + ); + } + } + + next(ctrl=false, shift=false, reverse=false) { + const num = shift === true ? 5 : 1 + if (this.compareBase !== null && this.compareTarget !== null) { + this.imageIndex = + this.imageIndex === this.compareBase + ? this.compareTarget + : this.compareBase; + this.setImage(this.imageIndex); + return; + } + if (reverse) { + if (ctrl) { + this.setImage(0) + } else { + this.setImage(this.imageIndex - num); + } + } else { + if (ctrl) { + this.setImage(-1) + } else { + this.setImage(this.imageIndex + num); + } + } + } + + prev(ctrl=false, shift=false) { this.next(ctrl, shift, true); } + + /* ================= COMPARE ================= */ + ttNcontextMenu(imgElement, index) { + const SOC = 'Select for Compare' + const CWS = 'Compare with Selected' + const CC = 'Clear Compare' + + let suggestions = {} + + if (this.compareBase !== index && this.compareTarget !== index) { + suggestions[SOC] = null + } + + if (this.compareBase !== null && this.compareBase !== index && this.compareTarget !== index) { + suggestions[CWS] = null + } + + if (this.comparing || this.compareBase !== null) { + suggestions[CC] = null + } + + const manualOffset = ['80%', '70%']; + ttN_CreateDropdown(imgElement, suggestions, async (s) => { + if (s === SOC) { + this.compareBase = index; + this.setImage(index); + this.updatePreviewHighlight(); + } + if (s === CWS) { + if (this.compareBase !== null && this.compareBase !== index) { + this.compareTarget = index; + this.imageIndex = this.compareBase; + this.comparing = true; + this.setImage(index); + } + this.updatePreviewHighlight(); + } + if (s === CC) { + this.compareBase = null; + this.compareTarget = null; + this.comparing = false; + this.updatePreviewHighlight(); + } + }, true, manualOffset, this.wrapper) + } + + updatePreviewHighlight() { + [...this.previewBar.children].forEach((el, i) => { + el.classList.toggle("active", i === this.imageIndex); + el.classList.toggle("compare-base", i === this.compareBase); + el.classList.toggle("compare-target", i === this.compareTarget); + + el.classList.toggle("before", i < this.imageIndex) + el.classList.toggle("after", i > this.imageIndex) + }); + } + + /* ================= TRANSFORM ================= */ + + resetTransform() { + this.scale = 1; + this.offsetX = 0; + this.offsetY = 0; + this.applyTransform(); + } + + applyTransform() { + const x = Math.round(this.offsetX * 1000) / 1000; + const y = Math.round(this.offsetY * 1000) / 1000; + const s = Math.round(this.scale * 1000) / 1000; + + this.image.style.transform = + `translate(${x}px, ${y}px) scale(${s})`; + } + + applyPreviewTranslation() { + if (!this.previewBar.children.length) return; + + const active = this.previewBar.children[this.imageIndex]; + if (!active) return; + + requestAnimationFrame(() => { + // Distance from preview bar left edge to active center + const activeCenter = + active.offsetLeft + + active.offsetWidth / 2 + + parseFloat(getComputedStyle(this.previewBar).paddingLeft); + + // Visible center of screen + const screenCenter = this.wrapper.clientWidth / 2; + + // Compute translation so activeCenter aligns with screenCenter + const translateX = screenCenter - activeCenter; + + this.previewBar.style.transform = + `translateX(${translateX}px)`; + }); + } + + zoomImage(e) { + const rect = this.image.getBoundingClientRect(); + + // Mouse position relative to image + const mouseX = e.clientX - rect.left; + const mouseY = e.clientY - rect.top; + + const prevScale = this.scale; + const zoomFactor = 1.2; + + let newScale = e.deltaY > 0 + ? prevScale / zoomFactor + : prevScale * zoomFactor; + newScale = Math.min(Math.max(newScale, 0.1), 8); + + const scaleRatio = newScale / prevScale; + + // Adjust offsets so the point under cursor stays fixed + this.offsetX -= mouseX * (scaleRatio - 1); + this.offsetY -= mouseY * (scaleRatio - 1); + + if (Math.abs(this.offsetX) < 0.0001) this.offsetX = 0; + if (Math.abs(this.offsetY) < 0.0001) this.offsetY = 0; + + this.scale = newScale; + this.applyTransform(); + } + + fitToScreen() { + if (!this.image.naturalWidth || !this.image.naturalHeight) return; + + const wrapperWidth = this.wrapper.clientWidth; + const wrapperHeight = this.wrapper.clientHeight; + + const imgWidth = this.image.naturalWidth; + const imgHeight = this.image.naturalHeight; + + const scaleX = wrapperWidth / imgWidth; + const scaleY = wrapperHeight / imgHeight; + + this.scale = Math.min(scaleX, scaleY); + + const scaledWidth = imgWidth * this.scale; + const scaledHeight = imgHeight * this.scale; + + this.offsetX = -(scaledWidth - imgWidth) / 2; + this.offsetY = -(scaledHeight - imgHeight) / 2; + this.applyTransform(); + } + /* ================ HELPERS ================= */ + + _isMouseOverElement(element, mouseX, mouseY) { + if (!element) return false; + const rect = element.getBoundingClientRect(); + return ( + mouseX >= rect.left && + mouseX <= rect.right && + mouseY >= rect.top && + mouseY <= rect.bottom + ); + } + + _isOverUI(mouseX, mouseY) { + if (this.previewBar && this._isMouseOverElement(this.previewBar, mouseX, mouseY)) { + return true + } + if (this.settingsBtn && this._isMouseOverElement(this.settingsBtn, mouseX, mouseY)) { + return true + } + if (this.settingsMenu && this._isMouseOverElement(this.settingsMenu, mouseX, mouseY)) { + return true + } + + return false + } + + _reset_hideUI_Timeout(timeout=3700) { + clearTimeout(this.hideTimeout); + + this.hideTimeout = setTimeout(() => { + if (this.slideshow && this.autohide) { + this.toggleUI(false, false); + } + }, timeout); + } + + toggleUI(show=null, reset=true) { + if (show==null) { + this.previewBar.classList.toggle('hidden') + this.settingsBtn.classList.toggle('hidden') + this.settingsMenu?.classList.toggle('hidden') + } else { + this.previewBar.classList.toggle('hidden', !show) + this.settingsBtn.classList.toggle('hidden', !show) + this.settingsMenu?.classList.toggle('hidden', !show) + } + if (reset) this._reset_hideUI_Timeout(); + } + + toggleSettingsMenu() { + if (this.settingsMenu) { + this.settingsMenu.remove(); + this.settingsMenu = null; + return; + } + + const menu = this.doc.createElement("div"); + menu.className = "settingsMenu" + + const autoBtn = this.doc.createElement("button"); + autoBtn.className = 'ttn-btn' + autoBtn.id = 'autoBtn' + autoBtn.innerText = `Autohide: ${this.autohide ? "ON" : "OFF"}`; + autoBtn.onclick = () => { + this.autohide = !this.autohide; + localStorage.setItem(STORAGE_KEYS.AUTOHIDE, + JSON.stringify(this.autohide) + ); + autoBtn.innerText = + `Autohide: ${this.autohide ? "ON" : "OFF"}`; + }; + + const invertBtn = this.doc.createElement("button"); + invertBtn.className = 'ttn-btn' + invertBtn.id = 'invertBtn' + invertBtn.innerText = `Wheel: ${this.invertctrl ? "ZOOM" : "SCROLL"}`; + invertBtn.onclick = () => { + this.invertctrl = !this.invertctrl; + localStorage.setItem(STORAGE_KEYS.INVERT, + JSON.stringify(this.invertctrl) + ); + invertBtn.innerText = + `Wheel: ${this.invertctrl ? "ZOOM" : "SCROLL"}`; + }; + + const slideBtn = this.doc.createElement("button"); + slideBtn.className = 'ttn-btn' + slideBtn.id = 'slideBtn' + slideBtn.innerText = `Slideshow: ${this.slideshow ? "ON" : "OFF"}`; + slideBtn.onclick = () => { + this.setSlideshow(!this.slideshow) + }; + + const fitScrnBtn = this.doc.createElement("button"); + fitScrnBtn.className = 'ttn-btn' + fitScrnBtn.id = 'fitScrnBtn' + fitScrnBtn.innerText = `Fit to Screen: ${this.fitscreentoggle ? "ON" : "OFF"}`; + fitScrnBtn.onclick = () => { + this.fitscreentoggle = !this.fitscreentoggle; + localStorage.setItem(STORAGE_KEYS.FITSCREEN, + JSON.stringify(this.fitscreentoggle)) + fitScrnBtn.innerText = `Fit to Screen: ${this.fitscreentoggle ? "ON" : "OFF"}`; + if (this.fitscreentoggle) this.fitToScreen() + } + + const infoEl = this.doc.createElement("p") + infoEl.textContent = "Up Arrow - Hide/Show UI\nDown Arrow - Toggle Slideshow\nLeft Arrow - Previous Image\nRight Arrow - Next Image\nF - Fit image to window" + + menu.appendChild(autoBtn); + menu.appendChild(this.doc.createElement("br")); + menu.appendChild(invertBtn); + menu.appendChild(this.doc.createElement("br")); + menu.appendChild(slideBtn); + if (this.mode != 'fullscreen') { + menu.appendChild(this.doc.createElement("br")); + menu.appendChild(fitScrnBtn); + } + + + this.wrapper.appendChild(menu); + this.settingsMenu = menu; + } + + setSlideshow(enabled) { + this.slideshow = enabled; + this.wrapper.classList.toggle("slideshow", enabled); + + if (this.settingsMenu) { + const slideBtn = this.settingsMenu.querySelector('#slideBtn'); + if (slideBtn) { + slideBtn.innerText = `Slideshow: ${this.slideshow ? "ON" : "OFF"}`; + } + } + + if (enabled) { + if (!this.comparing) this.setImage(-1); + if (this.autohide) this.toggleUI(false); + } else { + this.toggleUI(true); + } + } + + /* ================= EVENTS ================= */ + _onKeyDown = (e) => { + if (e.code === "ArrowLeft") { + e.preventDefault(); + this.prev(e.ctrlKey, e.shiftKey); + } + + if (e.code === "ArrowRight") { + e.preventDefault(); + this.next(e.ctrlKey, e.shiftKey); + } + + if (e.code === "ArrowDown") { + this.setSlideshow(!this.slideshow) + } + + if (e.code === "ArrowUp") { + this.toggleUI(); + } + + if (e.code === "Escape") { + if (this.mode === "fullscreen") { + if (this.doc.fullscreenElement) { + this.doc.exitFullscreen().catch(() => {}); + } + } else { + this.doc.defaultView.close(); + } + } + + if (e.code === "KeyF") { + this.fitToScreen() + } + } + + _onWheel = (e) => { + e.preventDefault(); + + const isZoom = (this.invertctrl && !e.ctrlKey) || + (!this.invertctrl && e.ctrlKey); + + if (isZoom) { + this.zoomImage(e); + return + } + + const now = performance.now(); + if (now - this._lastWheelTime < 40) return; + this._lastWheelTime = now; + + if (e.deltaY > 0) this.next(); + else this.prev(); + } + + _onMouseDown = (e) => { + if (!this._isOverUI(e.clientX, e.clientY)) { + e.preventDefault(); + this.dragging = true; + this.dragStartX = e.clientX; + this.dragStartY = e.clientY; + } + } + + _onMouseMove = (e) => { + if (this.dragging) { + const dx = e.clientX - this.dragStartX; + const dy = e.clientY - this.dragStartY; + + this.offsetX += dx; + this.offsetY += dy; + + this.dragStartX = e.clientX; + this.dragStartY = e.clientY; + + this.applyTransform(); + } + if (this.slideshow){ + if (this._isOverUI(e.clientX, e.clientY)) { + if (this.previewBar.classList.contains("hidden")) { + this.toggleUI(true); + } else { + this._reset_hideUI_Timeout(); + } + } + } + } + + _onMouseUp = (e) => { this.dragging = false; } + + _onClick = (e) => { + if (!this._isOverUI(e.clientX, e.clientY) && this.slideshow && this.autohide) { + this.toggleUI(false) + } + } + + _onDblClick = (e) => { + if (!this._isOverUI(e.clientX, e.clientY)) { + this.resetTransform(); + } + } + + _onFullscreenChange = () => { + if (this.doc.fullscreenElement) { + requestAnimationFrame(() => { + this.applyPreviewTranslation(); + }); + return; + } + + TTNViewer.fullscreenInstance = null; + this.destroy(); + }; + + attachEvents() { + this.doc.addEventListener("keydown", this._onKeyDown); + this.doc.addEventListener("wheel", this._onWheel, this._wheelOptions); + this.doc.addEventListener("mousedown", this._onMouseDown); + this.wrapper.addEventListener("mousemove", this._onMouseMove); + this.doc.addEventListener("mouseup", this._onMouseUp) + this.doc.addEventListener("click", this._onClick); + this.doc.addEventListener("dblclick", this._onDblClick); + this.doc.addEventListener("fullscreenchange", this._onFullscreenChange); + + this._lastWrapperSize = { w: 0, h: 0 }; + this._resizeObserver = new ResizeObserver(() => { + if (this._resizing) return; + const w = this.wrapper.clientWidth; + const h = this.wrapper.clientHeight; + + if (w === this._lastWrapperSize.w && + h === this._lastWrapperSize.h) { + return; + } + + this._lastWrapperSize = { w, h }; + + this._resizing = true; + + requestAnimationFrame(() => { + try { + if (this.fitscreentoggle) { + this.fitToScreen(); + } + this.applyPreviewTranslation(); + } finally { + this._resizing = false; + } + }); + }); + + this._resizeObserver.observe(this.wrapper); + } + + destroy() { + TTNViewer.instances.delete(this); + + this.doc.removeEventListener("keydown", this._onKeyDown); + this.doc.removeEventListener("wheel", this._onWheel, this._wheelOptions); + this.doc.removeEventListener("mousedown", this._onMouseDown); + this.wrapper.removeEventListener("mousemove", this._onMouseMove); + this.doc.removeEventListener("mouseup", this._onMouseUp) + this.doc.removeEventListener("click", this._onClick); + this.doc.removeEventListener("dblclick", this._onDblClick); + this.doc.removeEventListener("fullscreenchange", this._onFullscreenChange); + + if (this._resizeObserver) { + this._resizeObserver.disconnect(); + this._resizeObserver = null; + } + + this.wrapper?.remove(); + + } +} + +/* ========================================================= + LAUNCHERS +========================================================= */ + +function _getSelectedNode() { + const graphcanvas = LGraphCanvas.active_canvas; + if (graphcanvas.selected_nodes && + Object.keys(graphcanvas.selected_nodes).length === 1) { + return Object.values(graphcanvas.selected_nodes)[0]; + } + return null; +} + +function _getViewerNode() { + const node = _getSelectedNode() + if (node) return node + + let defaultNodeID = JSON.parse(localStorage.getItem(STORAGE_KEYS.DEFAULTNODE)) + if (defaultNodeID) { + let defaultNode = app.graph._nodes_by_id[defaultNodeID] + if (defaultNode) return defaultNode + } + + return null; +} + +export function _setDefaultFullscreenNode() { + let selectedNode = _getSelectedNode(); + if (selectedNode) { + localStorage.setItem(STORAGE_KEYS.DEFAULTNODE, JSON.stringify(selectedNode.id)); + } else { + localStorage.removeItem(STORAGE_KEYS.DEFAULTNODE); + } +} + +export function openFullscreenApp(node) { + if (TTNViewer.fullscreenInstance) return; + TTNViewer.fullscreenInstance = + new TTNViewer(node, document, "fullscreen"); +} + +export function openPopoutViewer(node) { + const win = window.open("", "_blank","width=512,height=512,resizable=yes"); + if (!win) return; + + TTN_POPOUTS.add(win); + win.addEventListener("beforeunload", () => { + TTN_POPOUTS.delete(win); + }); + + win.document.write(` + + TTN Viewer - [${node.id}] ${node.title} + `); + win.document.close(); + + new TTNViewer(node, win.document, "popout"); +} + +window.addEventListener("beforeunload", () => { + for (const win of TTN_POPOUTS) { + try { + win.close(); + } catch {} + } +}); + +/* ========================================================= + HOTKEYS +========================================================= */ + +document.addEventListener("keydown", (e) => { + if (e.code === "F11" && e.shiftKey) { + const node = _getViewerNode(); + if (node) openFullscreenApp(node); + } + + if (e.code === "F10" && e.shiftKey) { + const node = _getViewerNode(); + if (node) openPopoutViewer(node); + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNinterface.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNinterface.js new file mode 100644 index 0000000000000000000000000000000000000000..65b0bb42783bd16a2bcb76bdd29614320329b7b9 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNinterface.js @@ -0,0 +1,537 @@ +import { app } from "../../scripts/app.js"; + +const customPipeLineLink = "#7737AA" +const customPipeLineSDXLLink = "#0DC52B" +const customIntLink = "#29699C" +const customXYPlotLink = "#74DA5D" +const customLoraStackLink = "#87C7B7" +const customStringLink = "#7CBB1A" + +var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; +if (!customLinkColors["PIPE_LINE"] || !LGraphCanvas.link_type_colors["PIPE_LINE"]) {customLinkColors["PIPE_LINE"] = customPipeLineLink;} +if (!customLinkColors["PIPE_LINE_SDXL"] || !LGraphCanvas.link_type_colors["PIPE_LINE_SDXL"]) {customLinkColors["PIPE_LINE_SDXL"] = customPipeLineSDXLLink;} +if (!customLinkColors["INT"] || !LGraphCanvas.link_type_colors["INT"]) {customLinkColors["INT"] = customIntLink;} +if (!customLinkColors["XYPLOT"] || !LGraphCanvas.link_type_colors["XYPLOT"]) {customLinkColors["XYPLOT"] = customXYPlotLink;} +if (!customLinkColors["ADV_XYPLOT"] || !LGraphCanvas.link_type_colors["ADV_XYPLOT"]) {customLinkColors["ADV_XYPLOT"] = customXYPlotLink;} +if (!customLinkColors["LORA_STACK"] || !LGraphCanvas.link_type_colors["LORA_STACK"]) {customLinkColors["LORA_STACK"] = customLoraStackLink;} +if (!customLinkColors["CONTROL_NET_STACK"] || !LGraphCanvas.link_type_colors["CONTROL_NET_STACK"]) {customLinkColors["CONTROL_NET_STACK"] = customLoraStackLink;} +if (!customLinkColors["STRING"] || !LGraphCanvas.link_type_colors["STRING"]) {customLinkColors["STRING"] = customStringLink;} + +localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors)); + +app.registerExtension({ + name: "comfy.ttN.interface", + init() { + function adjustToGrid(val, gridSize) { + return Math.round(val / gridSize) * gridSize; + } + + function moveNodeBasedOnKey(e, node, gridSize, shiftMult) { + switch (e.code) { + case 'ArrowUp': + node.pos[1] -= gridSize * shiftMult; + break; + case 'ArrowDown': + node.pos[1] += gridSize * shiftMult; + break; + case 'ArrowLeft': + node.pos[0] -= gridSize * shiftMult; + break; + case 'ArrowRight': + node.pos[0] += gridSize * shiftMult; + break; + } + node.setDirtyCanvas(true, true); + } + + function keyMoveNode(e, node) { + let gridSize = JSON.parse(localStorage.getItem('Comfy.Settings.Comfy.SnapToGrid.GridSize')); + gridSize = gridSize ? parseInt(gridSize) : 1; + let shiftMult = e.shiftKey ? 10 : 1; + + node.pos[0] = adjustToGrid(node.pos[0], gridSize); + node.pos[1] = adjustToGrid(node.pos[1], gridSize); + + moveNodeBasedOnKey(e, node, gridSize, shiftMult); + } + + function getSelectedNodes(e) { + const inputField = e.composedPath()[0]; + if (inputField.tagName === "TEXTAREA") return; + if (e.ctrlKey && ['ArrowUp', 'ArrowDown', 'ArrowLeft', 'ArrowRight'].includes(e.code)) { + let graphcanvas = LGraphCanvas.active_canvas; + for (let node in graphcanvas.selected_nodes) { + keyMoveNode(e, graphcanvas.selected_nodes[node]); + } + } + } + + window.addEventListener("keydown", getSelectedNodes, true); + + LGraphCanvas.prototype.ttNcreateDialog = function (htmlContent, onOK, onCancel) { + var dialog = document.createElement("div"); + dialog.is_modified = false; + dialog.className = "ttN-dialog"; + dialog.innerHTML = htmlContent + "OK"; + + dialog.close = function() { + if (dialog.parentNode) { + dialog.parentNode.removeChild(dialog); + } + }; + + var inputs = Array.from(dialog.querySelectorAll("input, select")); + + inputs.forEach(input => { + input.addEventListener("keydown", function(e) { + dialog.is_modified = true; + if (e.keyCode == 27) { // ESC + onCancel && onCancel(); + dialog.close(); + } else if (e.keyCode == 13) { // Enter + onOK && onOK(dialog, inputs.map(input => input.value)); + dialog.close(); + } else if (e.keyCode != 13 && e.target.localName != "textarea") { + return; + } + e.preventDefault(); + e.stopPropagation(); + }); + }); + + var graphcanvas = LGraphCanvas.active_canvas; + var canvas = graphcanvas.canvas; + + var rect = canvas.getBoundingClientRect(); + var offsetx = -20; + var offsety = -20; + if (rect) { + offsetx -= rect.left; + offsety -= rect.top; + } + + if (event) { + dialog.style.left = event.clientX + offsetx + "px"; + dialog.style.top = event.clientY + offsety + "px"; + } else { + dialog.style.left = canvas.width * 0.5 + offsetx + "px"; + dialog.style.top = canvas.height * 0.5 + offsety + "px"; + } + + var button = dialog.querySelector("#ok"); + button.addEventListener("click", function() { + onOK && onOK(dialog, inputs.map(input => input.value)); + dialog.close(); + }); + + canvas.parentNode.appendChild(dialog); + + if(inputs) inputs[0].focus(); + + var dialogCloseTimer = null; + dialog.addEventListener("mouseleave", function(e) { + if(LiteGraph.dialog_close_on_mouse_leave) + if (!dialog.is_modified && LiteGraph.dialog_close_on_mouse_leave) + dialogCloseTimer = setTimeout(dialog.close, LiteGraph.dialog_close_on_mouse_leave_delay); //dialog.close(); + }); + dialog.addEventListener("mouseenter", function(e) { + if(LiteGraph.dialog_close_on_mouse_leave) + if(dialogCloseTimer) clearTimeout(dialogCloseTimer); + }); + + return dialog; + }; + + LGraphCanvas.prototype.ttNsetNodeDimension = function (node) { + const nodeWidth = node.size[0]; + const nodeHeight = node.size[1]; + + let input_html = ""; + input_html += ""; + + LGraphCanvas.prototype.ttNcreateDialog("Width/Height" + input_html, + function(dialog, values) { + var widthValue = Number(values[0]) ? values[0] : nodeWidth; + var heightValue = Number(values[1]) ? values[1] : nodeHeight; + let sz = node.computeSize(); + node.setSize([Math.max(sz[0], widthValue), Math.max(sz[1], heightValue)]); + if (dialog.parentNode) { + dialog.parentNode.removeChild(dialog); + } + node.setDirtyCanvas(true, true); + }, + null + ); + }; + + LGraphCanvas.prototype.ttNsetSlotTypeColor = function(slot){ + var slotColor = LGraphCanvas.link_type_colors[slot.output.type].toUpperCase(); + var slotType = slot.output.type; + // Check if the color is in the correct format + if (!/^#([0-9A-F]{3}){1,2}$/i.test(slotColor)) { + slotColor = "#FFFFFF"; + } + + // Check if browser supports color input type + var inputType = "color"; + var inputID = " id='colorPicker'"; + var inputElem = document.createElement("input"); + inputElem.setAttribute("type", inputType); + if (inputElem.type !== "color") { + // If it doesn't, fall back to text input + inputType = "text"; + inputID = " "; + } + + let input_html = ""; + input_html += "DEFAULT"; // Add a default button + input_html += "RESET"; // Add a reset button + + var dialog = LGraphCanvas.prototype.ttNcreateDialog("" + slotType + "" + + input_html, + function(dialog, values){ + var hexColor = values[0].toUpperCase(); + + if (!/^#([0-9A-F]{3}){1,2}$/i.test(hexColor)) { + return + } + + if (hexColor === slotColor) { + return + } + + var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; + if (!customLinkColors[slotType + "_ORIG"]) {customLinkColors[slotType + "_ORIG"] = slotColor}; + customLinkColors[slotType] = hexColor; + localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors)); + + app.canvas.default_connection_color_byType[slotType] = hexColor; + LGraphCanvas.link_type_colors[slotType] = hexColor; + } + ); + + var resetButton = dialog.querySelector("#reset"); + resetButton.addEventListener("click", function() { + var colorInput = dialog.querySelector("input[type='" + inputType + "']"); + colorInput.value = slotColor; + }); + + var defaultButton = dialog.querySelector("#Default"); + defaultButton.addEventListener("click", function() { + var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; + if (customLinkColors[slotType+"_ORIG"]) { + app.canvas.default_connection_color_byType[slotType] = customLinkColors[slotType+"_ORIG"]; + LGraphCanvas.link_type_colors[slotType] = customLinkColors[slotType+"_ORIG"]; + + delete customLinkColors[slotType+"_ORIG"]; + delete customLinkColors[slotType]; + } + localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors)); + dialog.close() + }) + + var colorPicker = dialog.querySelector("input[type='" + inputType + "']"); + colorPicker.addEventListener("focusout", function(e) { + this.focus(); + }); + }; + + LGraphCanvas.prototype.ttNdefaultBGcolor = function(node, defaultBGColor){ + setTimeout(() => { + if (defaultBGColor !== 'default' && !node.color) { + node.addProperty('ttNbgOverride', defaultBGColor); + node.color=defaultBGColor.color; + node.bgcolor=defaultBGColor.bgcolor; + } + + if (node.color && node.properties.ttNbgOverride) { + if (node.properties.ttNbgOverride !== defaultBGColor && node.color === node.properties.ttNbgOverride.color) { + if (defaultBGColor === 'default') { + delete node.properties.ttNbgOverride + delete node.color + delete node.bgcolor + } else { + node.properties.ttNbgOverride = defaultBGColor + node.color=defaultBGColor.color; + node.bgcolor=defaultBGColor.bgcolor; + } + } + + if (node.properties.ttNbgOverride !== defaultBGColor && node.color !== node.properties.ttNbgOverride?.color) { + delete node.properties.ttNbgOverride + } + } + }, 0); + }; + + LGraphCanvas.prototype.ttNfixNodeSize = function(node){ + setTimeout(() => { + node.onResize?.(node.size); + }, 0); + }; + + LGraphCanvas.ttNonShowLinkStyles = function(value, options, e, menu, node) { + new LiteGraph.ContextMenu( + LiteGraph.LINK_RENDER_MODES, + { event: e, callback: inner_clicked, parentMenu: menu, node: node } + ); + + function inner_clicked(v) { + if (!node) { + return; + } + var kV = Object.values(LiteGraph.LINK_RENDER_MODES).indexOf(v); + + localStorage.setItem('Comfy.Settings.Comfy.LinkRenderMode', JSON.stringify(String(kV))); + + app.canvas.links_render_mode = kV; + app.graph.setDirtyCanvas(true); + } + + return false; + }; + + LGraphCanvas.ttNlinkStyleBorder = function(value, options, e, menu, node) { + new LiteGraph.ContextMenu( + [false, true], + { event: e, callback: inner_clicked, parentMenu: menu, node: node } + ); + + function inner_clicked(v) { + if (!node) { + return; + } + + localStorage.setItem('Comfy.Settings.ttN.links_render_border', JSON.stringify(v)); + + app.canvas.render_connections_border = v; + } + + return false; + }; + + LGraphCanvas.ttNlinkStyleShadow = function(value, options, e, menu, node) { + new LiteGraph.ContextMenu( + [false, true], + { event: e, callback: inner_clicked, parentMenu: menu, node: node } + ); + + function inner_clicked(v) { + if (!node) { + return; + } + + localStorage.setItem('Comfy.Settings.ttN.links_render_shadow', JSON.stringify(v)); + + app.canvas.render_connections_shadows = v; + } + + return false; + }; + + LGraphCanvas.ttNsetDefaultBGColor = function(value, options, e, menu, node) { + if (!node) { + throw "no node for color"; + } + + var values = []; + values.push({ + value: null, + content: + "No Color" + }); + + for (var i in LGraphCanvas.node_colors) { + var color = LGraphCanvas.node_colors[i]; + var value = { + value: i, + content: + "" + + i + + "" + }; + values.push(value); + } + new LiteGraph.ContextMenu(values, { + event: e, + callback: inner_clicked, + parentMenu: menu, + node: node + }); + + function inner_clicked(v) { + if (!node) { + return; + } + + var defaultBGColor = v.value ? LGraphCanvas.node_colors[v.value] : 'default'; + + localStorage.setItem('Comfy.Settings.ttN.defaultBGColor', JSON.stringify(defaultBGColor)); + + for (var i in app.graph._nodes) { + LGraphCanvas.prototype.ttNdefaultBGcolor(app.graph._nodes[i], defaultBGColor); + } + + node.setDirtyCanvas(true, true); + } + + return false; + }; + + LGraphCanvas.prototype.ttNupdateRenderSettings = function (app) { + let showLinkBorder = Number(localStorage.getItem('Comfy.Settings.ttN.links_render_border')); + if (showLinkBorder !== undefined) {app.canvas.render_connections_border = showLinkBorder} + + let showLinkShadow = Number(localStorage.getItem('Comfy.Settings.ttN.links_render_shadow')); + if (showLinkShadow !== undefined) {app.canvas.render_connections_shadows = showLinkShadow} + + let showExecOrder = localStorage.getItem('Comfy.Settings.ttN.showExecutionOrder'); + if (showExecOrder === 'true') {app.canvas.render_execution_order = true} + else {app.canvas.render_execution_order = false} + + var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; + Object.assign(app.canvas.default_connection_color_byType, customLinkColors); + Object.assign(LGraphCanvas.link_type_colors, customLinkColors); + } + }, + + beforeRegisterNodeDef(nodeType, nodeData, app) { + const originalGetSlotMenuOptions = nodeType.prototype.getSlotMenuOptions; + nodeType.prototype.getSlotMenuOptions = (slot) => { + originalGetSlotMenuOptions?.apply(this, slot); + let menu_info = []; + if ( + slot && + slot.output && + slot.output.links && + slot.output.links.length + ) { + menu_info.push({ content: "Disconnect Links", slot: slot }); + } + var _slot = slot.input || slot.output; + if (_slot.removable){ + menu_info.push( + _slot.locked + ? "Cannot remove" + : { content: "Remove Slot", slot: slot } + ); + } + if (!_slot.nameLocked){ + menu_info.push({ content: "Rename Slot", slot: slot }); + } + + menu_info.push({ content: "🌏 Slot Type Color", slot: slot, callback: () => { LGraphCanvas.prototype.ttNsetSlotTypeColor(slot) } }); + menu_info.push({ content: "🌏 Show Link Border", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNlinkStyleBorder }); + menu_info.push({ content: "🌏 Show Link Shadow", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNlinkStyleShadow }); + menu_info.push({ content: "🌏 Link Style", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNonShowLinkStyles }); + + return menu_info; + } + }, + + setup() { + LGraphCanvas.prototype.ttNupdateRenderSettings(app); + }, + nodeCreated(node) { + LGraphCanvas.prototype.ttNfixNodeSize(node); + let defaultBGColor = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.defaultBGColor')); + if (defaultBGColor) {LGraphCanvas.prototype.ttNdefaultBGcolor(node, defaultBGColor)}; + }, + loadedGraphNode(node, app) { + LGraphCanvas.prototype.ttNupdateRenderSettings(app); + + let defaultBGColor = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.defaultBGColor')); + if (defaultBGColor) {LGraphCanvas.prototype.ttNdefaultBGcolor(node, defaultBGColor)}; + }, +}); + +var styleElement = document.createElement("style"); +const cssCode = ` +.ttN-dialog { + top: 10px; + left: 10px; + min-height: 1em; + background-color: var(--comfy-menu-bg); + font-size: 1.2em; + box-shadow: 0 0 7px black !important; + z-index: 10; + display: grid; + border-radius: 7px; + padding: 7px 7px; + position: fixed; +} +.ttN-dialog .name { + display: inline-block; + min-height: 1.5em; + font-size: 14px; + font-family: sans-serif; + color: var(--descrip-text); + padding: 0; + vertical-align: middle; + justify-self: center; +} +.ttN-dialog input, +.ttN-dialog textarea, +.ttN-dialog select { + margin: 3px; + min-width: 60px; + min-height: 1.5em; + background-color: var(--comfy-input-bg); + border: 2px solid; + border-color: var(--border-color); + color: var(--input-text); + border-radius: 14px; + padding-left: 10px; + outline: none; +} + +.ttN-dialog #colorPicker { + margin: 0px; + min-width: 100%; + min-height: 2.5em; + border-radius: 0px; + padding: 0px 2px 0px 2px; + border: unset; +} + +.ttN-dialog textarea { + min-height: 150px; +} + +.ttN-dialog button { + margin-top: 3px; + vertical-align: top; + background-color: #999; + border: 0; + padding: 4px 18px; + border-radius: 20px; + cursor: pointer; +} + +.ttN-dialog button.rounded, +.ttN-dialog input.rounded { + border-radius: 0 12px 12px 0; +} + +.ttN-dialog .helper { + overflow: auto; + max-height: 200px; +} + +.ttN-dialog .help-item { + padding-left: 10px; +} + +.ttN-dialog .help-item:hover, +.ttN-dialog .help-item.selected { + cursor: pointer; + background-color: white; + color: black; +} +` +styleElement.innerHTML = cssCode +document.head.appendChild(styleElement); diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNwidgets.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNwidgets.js new file mode 100644 index 0000000000000000000000000000000000000000..4ec14cd54803e93955640d0bfd5b268dd9ee6108 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNwidgets.js @@ -0,0 +1,154 @@ +import { app } from "../../scripts/app.js"; +import { ComfyWidgets } from "../../scripts/widgets.js"; + +class SeedControl { + constructor(node) { + this.node = node; + + for (const [i, w] of this.node.widgets.entries()) { + if (w.name === "seed" || w.name === "noise_seed") { + this.seedWidget = w; + } + else if (w.name === "control_after_generate" || w.name === "control_before_generate") { + this.controlWidget = w; + } + } + if (!this.seedWidget) { + throw new Error("Something's wrong; expected seed widget"); + } + const randMax = Math.min(1125899906842624, this.seedWidget.options.max); + const randMin = Math.max(0, this.seedWidget.options.min); + const randomRange = (randMax - Math.max(0, randMin)) / (this.seedWidget.options.step / 10); + this.randomSeedButton = this.node.addWidget("button", "🎲 New Fixed Random", null, () => { + this.seedWidget.value = + Math.floor(Math.random() * randomRange) * (this.seedWidget.options.step / 10) + randMin; + this.controlWidget.value = "fixed"; + }, { serialize: false }); + + this.seedWidget.linkedWidgets = [this.randomSeedButton, this.controlWidget]; + } +} + +function addTextDisplay(nodeType) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const r = onNodeCreated?.apply(this, arguments); + const w = ComfyWidgets["STRING"](this, "display", ["STRING", { multiline: true, placeholder: " " }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.7; + w.inputEl.style.cursor = "auto"; + return r; + }; + + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + + for (const widget of this.widgets) { + if (widget.type === "customtext" && widget.name === "display" && widget.inputEl.readOnly === true) { + widget.value = message.text.join(''); + } + } + + this.onResize?.(this.size); + }; +} + +function overwriteSeedControl(nodeType) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + this.seedControl = new SeedControl(this); + } +} + +const HAS_EXECUTED = Symbol(); +class IndexControl { + constructor(node) { + this.node = node; + this.node.properties = this.node.properties || {}; + for (const [i, w] of this.node.widgets.entries()) { + if (w.name === "index") { + this.indexWidget = w; + } + else if (w.name === "index_control") { + this.controlWidget = w; + } else if (w.name === "text") { + this.textWidget = w; + } + } + + if (!this.indexWidget) { + throw new Error("Something's wrong; expected index widget"); + } + + const applyWidgetControl = () => { + var v = this.controlWidget.value; + + //number + let min = this.indexWidget.options.min; + let max = this.textWidget.value.split("\n").length - 1; + // limit to something that javascript can handle + max = Math.min(1125899906842624, max); + min = Math.max(-1125899906842624, min); + + //adjust values based on valueControl Behaviour + switch (v) { + case "fixed": + break; + case "increment": + this.indexWidget.value += 1; + break; + case "decrement": + this.indexWidget.value -= 1; + break; + case "randomize": + this.indexWidget.value = Math.floor(Math.random() * (max - min + 1)) + min; + default: + break; + } + /*check if values are over or under their respective + * ranges and set them to min or max.*/ + if (this.indexWidget.value < min) this.indexWidget.value = max; + + if (this.indexWidget.value > max) + this.indexWidget.value = min; + this.indexWidget.callback(this.indexWidget.value); + }; + + this.controlWidget.beforeQueued = () => { + // Don't run on first execution + if (this.controlWidget[HAS_EXECUTED]) { + applyWidgetControl(); + } + this.controlWidget[HAS_EXECUTED] = true; + }; + + this.indexWidget.linkedWidgets = [this.controlWidget]; + } +} + +function overwriteIndexControl(nodeType) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + this.indexControl = new IndexControl(this); + } +} + +app.registerExtension({ + name: "comfy.ttN.widgets", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name.startsWith("ttN ") && ["ttN pipeLoader_v2", "ttN pipeKSampler_v2", "ttN pipeKSamplerAdvanced_v2", "ttN pipeLoaderSDXL_v2", "ttN pipeKSamplerSDXL_v2", "ttN KSampler_v2"].includes(nodeData.name)) { + if (nodeData.output_name.includes('seed')) { + overwriteSeedControl(nodeType) + } + } + if (["ttN textDebug", "ttN advPlot range", "ttN advPlot string", "ttN advPlot combo", "ttN debugInput", "ttN textOutput", "ttN advPlot merge"].includes(nodeData.name)) { + addTextDisplay(nodeType) + } + if (nodeData.name.startsWith("ttN textCycle")) { + overwriteIndexControl(nodeType) + } + }, +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNxyPlot.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNxyPlot.js new file mode 100644 index 0000000000000000000000000000000000000000..cd73e0673df664619820d122ffc5c7b36d698ccf --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNxyPlot.js @@ -0,0 +1,212 @@ +import { app } from "../../scripts/app.js"; +import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttNdropdown.js"; + +function generateNumList(dictionary) { + const minimum = dictionary["min"] || 0; + const maximum = dictionary["max"] || 0; + const step = dictionary["step"] || 1; + + if (step === 0) { + return []; + } + + const result = []; + let currentValue = minimum; + + while (currentValue <= maximum) { + if (Number.isInteger(step)) { + result.push(Math.round(currentValue) + '; '); + } else { + let formattedValue = currentValue.toFixed(3); + if(formattedValue == -0.000){ + formattedValue = '0.000'; + } + if (!/\.\d{3}$/.test(formattedValue)) { + formattedValue += "0"; + } + result.push(formattedValue + "; "); + } + currentValue += step; + } + + if (maximum >= 0 && minimum >= 0) { + //low to high + return result; + } + else { + //high to low + return result.reverse(); + } +} + +let plotDict = {}; +let currentOptionsDict = {}; + +function getCurrentOptionLists(node, widget) { + const nodeId = String(node.id); + const widgetName = widget.name; + const widgetValue = widget.value.replace(/^(loader|sampler):\s/, ''); + + if (!currentOptionsDict[nodeId] || !currentOptionsDict[nodeId][widgetName]) { + currentOptionsDict[nodeId] = {...currentOptionsDict[nodeId], [widgetName]: plotDict[widgetValue]}; + } else if (currentOptionsDict[nodeId][widgetName] != plotDict[widgetValue]) { + currentOptionsDict[nodeId][widgetName] = plotDict[widgetValue]; + } +} + +function addGetSetters(node) { + if (node.widgets) + for (const w of node.widgets) { + if (w.name === "x_axis" || + w.name === "y_axis") { + let widgetValue = w.value; + + // Define getters and setters for widget values + Object.defineProperty(w, 'value', { + + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + getCurrentOptionLists(node, w); + } + } + }); + } + } +} + +function dropdownCreator(node) { + if (node.widgets) { + const widgets = node.widgets.filter( + (n) => (n.type === "customtext" && n.dynamicPrompts !== false) || n.dynamicPrompts + ); + + for (const w of widgets) { + function replaceOptionSegments(selectedOption, inputSegments, cursorSegmentIndex, optionsList) { + if (selectedOption) { + inputSegments[cursorSegmentIndex] = selectedOption; + } + + return inputSegments.map(segment => verifySegment(segment, optionsList)) + .filter(item => item !== '') + .join(''); + } + + function verifySegment(segment, optionsList) { + segment = cleanSegment(segment); + + if (isInOptionsList(segment, optionsList)) { + return segment + '; '; + } + + let matchedOptions = findMatchedOptions(segment, optionsList); + + if (matchedOptions.length === 1 || matchedOptions.length === 2) { + return matchedOptions[0]; + } + + if (isInOptionsList(formatNumberSegment(segment), optionsList)) { + return formatNumberSegment(segment) + '; '; + } + + return ''; + } + + function cleanSegment(segment) { + return segment.replace(/(\n|;| )/g, ''); + } + + function isInOptionsList(segment, optionsList) { + return optionsList.includes(segment + '; '); + } + + function findMatchedOptions(segment, optionsList) { + return optionsList.filter(option => option.toLowerCase().includes(segment.toLowerCase())); + } + + function formatNumberSegment(segment) { + if (Number(segment)) { + return Number(segment).toFixed(3); + } + + if (['0', '0.', '0.0', '0.00', '00'].includes(segment)) { + return '0.000'; + } + return segment; + } + + + const onInput = function () { + const nodeId = node.id; + const axisWidgetName = w.name[0] + '_axis'; + + let optionsList = currentOptionsDict[nodeId]?.[axisWidgetName] || []; + if (optionsList.length === 0) {return} + + const inputText = w.inputEl.value; + const cursorPosition = w.inputEl.selectionStart; + + let inputSegments = inputText.split('; '); + + const cursorSegmentIndex = inputText.substring(0, cursorPosition).split('; ').length - 1; + const currentSegment = inputSegments[cursorSegmentIndex]; + const currentSegmentLower = currentSegment.replace(/\n/g, '').toLowerCase(); + + const filteredOptionsList = optionsList.filter(option => option.toLowerCase().includes(currentSegmentLower)).map(option => option.replace(/; /g, '')); + + if (filteredOptionsList.length > 0) { + ttN_CreateDropdown(w.inputEl, filteredOptionsList, (selectedOption) => { + const verifiedText = replaceOptionSegments(selectedOption, inputSegments, cursorSegmentIndex, optionsList); + w.inputEl.value = verifiedText; + }); + } + else { + ttN_RemoveDropdown(); + const verifiedText = replaceOptionSegments(null, inputSegments, cursorSegmentIndex, optionsList); + w.inputEl.value = verifiedText; + } + }; + + w.inputEl.removeEventListener('input', onInput); + w.inputEl.addEventListener('input', onInput); + w.inputEl.removeEventListener('mouseup', onInput); + w.inputEl.addEventListener('mouseup', onInput); + } + } +} + +app.registerExtension({ + name: "comfy.ttN.xyPlot", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "ttN xyPlot") { + plotDict = nodeData.input.hidden.plot_dict[0]; + + for (const key in plotDict) { + const value = plotDict[key]; + if (Array.isArray(value)) { + let updatedValues = []; + for (const v of value) { + updatedValues.push(v + '; '); + } + plotDict[key] = updatedValues; + } else if (typeof(value) === 'object') { + plotDict[key] = generateNumList(value); + } else { + plotDict[key] = value + '; '; + } + } + plotDict["None"] = []; + plotDict["---------------------"] = []; + } + }, + nodeCreated(node) { + if (node.constructor.title === "xyPlot") { + addGetSetters(node); + dropdownCreator(node); + + } + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNxyPlotAdv.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNxyPlotAdv.js new file mode 100644 index 0000000000000000000000000000000000000000..ce2a146cad30c8bff2c0a64647e751defa630148 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/ttNxyPlotAdv.js @@ -0,0 +1,814 @@ +import { app } from "../../scripts/app.js"; +import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttNdropdown.js"; + +const widgets_to_ignore = ['control_after_generate', 'empty_latent_aspect', 'empty_latent_width', 'empty_latent_height', 'batch_size'] +const valueCompletionRegex = /^\[(\d+):([^=\]]+)=(['"])([^'"]*)$/ +const widgetCompletionRegex = /^\[(\d+):([^=\]]*)$/ +const nodeCompletionRegex = /^\[([^:\]=]*)$/ +const nodeLabelRegex = /^\[(\d+)\]\s-\s(.+)$/ + +function getWidgetsOptions(node) { + const widgetsOptions = {} + const widgets = node.widgets + if (!widgets) return + for (const w of widgets) { + if (!w.type || !w.options) continue + const current_value = w.value + if (widgets_to_ignore.includes(w.name)) continue + //console.log(`WIDGET ${w.name}, ${w.type}, ${w.options}`) + if (w.name === 'seed' || (w.name === 'value' && node.constructor.title.toLowerCase() === 'seed')) { + widgetsOptions[w.name] = {'Random Seed': `${w.options.max}/${w.options.min}/${w.options.step}`} + continue + } + if (w.type === 'ttNhidden') { + if (w.options['max']) { + widgetsOptions[w.name] = {[current_value]: null} + continue + } else if (!w.options['values']) { + widgetsOptions[w.name] = {'string': null} + continue + } + } + if (w.type.startsWith('converted') || w.type === 'button') { + continue + } + if (w.type === 'toggle') { + widgetsOptions[w.name] = {'True': null, 'False': null} + continue + } + if (['customtext', 'text', 'string'].includes(w.type)) { + widgetsOptions[w.name] = {'string': null} + continue + } + if (w.type === 'number') { + widgetsOptions[w.name] = {[current_value]: null} + continue + } + let valueDict = {} + if (w.options.values) { + let vals = w.options.values; + + if (typeof w.options.values === 'function') { + vals = w.options.values() + } + + for (const v of vals) { + valueDict[v] = null + } + } + widgetsOptions[w.name] = valueDict + } + + //console.log('WIDGETS OPTIONS', widgetsOptions) + if (Object.keys(widgetsOptions).length === 0) { + return null + } + return widgetsOptions; +} + +function _addInputIDs(node, inputIDs, IDsToCheck) { + if (node.inputs) { + for (const input of node.inputs) { + if (input.link) { + let originID = node.graph.links[input.link].origin_id + inputIDs.push(originID); + if (!IDsToCheck.includes(originID)) { + IDsToCheck.push(originID); + } + } + } + } +} + +function _recursiveGetInputIDs(node) { + const inputIDs = []; + const IDsToCheck = [node.id]; + + while (IDsToCheck.length > 0) { + const currentID = IDsToCheck.pop(); + const currentNode = node.graph._nodes_by_id[currentID]; + if (currentNode.type === "ttN advanced xyPlot") { + continue + } + _addInputIDs(currentNode, inputIDs, IDsToCheck); + } + + return inputIDs; +} + +function getNodesWidgetsDict(xyNode, plotLines=false) { + const nodeWidgets = {}; + if (plotLines) { + nodeWidgets['Add Plot Line'] = {'Only Values Label': null, 'Title and Values Label': null, 'ID, Title and Values Label': null}; + } + + const xyNodeLinks = xyNode.outputs[0]?.links + if (!xyNodeLinks || xyNodeLinks.length == 0) { + nodeWidgets['Connect to advanced xyPlot for options'] = null + return nodeWidgets + } + + const plotNodeLink = xyNodeLinks[0] + const plotNodeID = xyNode.graph.links[plotNodeLink].target_id + const plotNodeTitle = xyNode.graph._nodes_by_id[plotNodeID].constructor.title + const plotNode = app.graph._nodes_by_id[plotNodeID] + + const options = getWidgetsOptions(plotNode) + if (options) { + nodeWidgets[`[${plotNodeID}] - ${plotNodeTitle}`] = options + } + + const inputIDS = _recursiveGetInputIDs(plotNode) + for (const iID of inputIDS) { + const iNode = app.graph._nodes_by_id[iID]; + const iNodeTitle = iNode.constructor.title + if (iNodeTitle === 'advanced xyPlot') { + continue + } + const options = getWidgetsOptions(iNode) + if (!options) continue + nodeWidgets[`[${iID}] - ${iNodeTitle}`] = options + } + return nodeWidgets +} + +function getOpenExpressionContext(inputText, cursorPosition) { + const textBeforeCursor = inputText.slice(0, cursorPosition); + const expressionStart = textBeforeCursor.lastIndexOf('['); + + if (expressionStart === -1 || textBeforeCursor.indexOf(']', expressionStart) !== -1) { + return null; + } + + return { + expressionStart, + expressionBeforeCursor: textBeforeCursor.slice(expressionStart), + }; +} + +function getValueCompletionContext(inputText, cursorPosition) { + const expressionContext = getOpenExpressionContext(inputText, cursorPosition); + if (!expressionContext) { + return null; + } + + const expressionBeforeCursor = expressionContext.expressionBeforeCursor; + const match = expressionBeforeCursor.match(valueCompletionRegex); + if (!match) { + return null; + } + + const [, nodeId, rawWidgetName, quoteChar, valueQuery] = match; + const widgetName = rawWidgetName.trim(); + const replaceEndIndex = inputText.indexOf(']', cursorPosition); + + return { + nodeId, + widgetName, + lookupWidgetName: widgetName.replace(/\.append$/, ''), + quoteChar, + valueQuery, + replaceStart: expressionContext.expressionStart, + replaceEnd: replaceEndIndex === -1 ? cursorPosition : replaceEndIndex + 1, + }; +} + +function getWidgetCompletionContext(inputText, cursorPosition) { + const expressionContext = getOpenExpressionContext(inputText, cursorPosition); + if (!expressionContext) { + return null; + } + + const match = expressionContext.expressionBeforeCursor.match(widgetCompletionRegex); + if (!match) { + return null; + } + + const [, nodeId, rawWidgetQuery] = match; + const widgetStart = expressionContext.expressionStart + nodeId.length + 2; + const equalIndex = inputText.indexOf('=', widgetStart); + const bracketIndex = inputText.indexOf(']', widgetStart); + const hasEquals = equalIndex !== -1 && (bracketIndex === -1 || equalIndex < bracketIndex); + const widgetEnd = hasEquals ? equalIndex : (bracketIndex === -1 ? cursorPosition : Math.min(cursorPosition, bracketIndex)); + + return { + nodeId, + widgetQuery: rawWidgetQuery.trim(), + widgetStart, + widgetEnd, + hasEquals, + }; +} + +function getNodeCompletionContext(inputText, cursorPosition) { + const expressionContext = getOpenExpressionContext(inputText, cursorPosition); + if (!expressionContext) { + return null; + } + + const match = expressionContext.expressionBeforeCursor.match(nodeCompletionRegex); + if (!match) { + return null; + } + + const nodeStart = expressionContext.expressionStart + 1; + const colonIndex = inputText.indexOf(':', nodeStart); + const equalIndex = inputText.indexOf('=', nodeStart); + const bracketIndex = inputText.indexOf(']', nodeStart); + + const delimiters = [colonIndex, equalIndex, bracketIndex].filter((index) => index !== -1); + const firstDelimiterIndex = delimiters.length > 0 ? Math.min(...delimiters) : -1; + const hasColon = colonIndex !== -1 && (firstDelimiterIndex === -1 || colonIndex === firstDelimiterIndex); + const nodeEnd = hasColon ? colonIndex : (firstDelimiterIndex === -1 ? cursorPosition : Math.min(cursorPosition, firstDelimiterIndex)); + + return { + nodeQuery: match[1].trim(), + nodeStart, + nodeEnd, + hasColon, + }; +} + +function getNodeWidgetOptions(nodeWidgets, nodeId) { + const nodeKey = Object.keys(nodeWidgets).find((key) => key.startsWith(`[${nodeId}] - `)); + if (!nodeKey) { + return null; + } + + const widgetOptions = nodeWidgets[nodeKey]; + if (!widgetOptions || typeof widgetOptions !== 'object') { + return null; + } + + return widgetOptions; +} + +function getNodeWidgetValues(nodeWidgets, nodeId, widgetName, lookupWidgetName) { + const widgetOptions = getNodeWidgetOptions(nodeWidgets, nodeId); + if (!widgetOptions) { + return []; + } + + const valuesDict = widgetOptions[widgetName] ?? widgetOptions[lookupWidgetName]; + if (!valuesDict || typeof valuesDict !== 'object') { + return []; + } + + return Object.keys(valuesDict).filter((value) => value && value !== 'string'); +} + +function getNodeWidgetNames(nodeWidgets, nodeId) { + const widgetOptions = getNodeWidgetOptions(nodeWidgets, nodeId); + if (!widgetOptions) { + return []; + } + + return Object.keys(widgetOptions).filter((widgetName) => widgetName && widgetName !== 'string'); +} + +function getNodeEntries(nodeWidgets) { + return Object.keys(nodeWidgets) + .map((key) => { + const match = key.match(nodeLabelRegex); + if (!match) { + return null; + } + const [, nodeId, nodeTitle] = match; + return { + nodeId, + nodeTitle, + label: `[${nodeId}] - ${nodeTitle}`, + searchText: `${nodeId} ${nodeTitle}`, + }; + }) + .filter(Boolean); +} + +function rankAutocompleteEntries(entries, query, textSelector = (entry) => entry) { + const normalizedQuery = query.toLowerCase().trim(); + const tokens = normalizedQuery.split(/\s+/).filter(Boolean); + + if (tokens.length === 0) { + return entries; + } + + return entries + .map((entry) => { + const normalizedValue = textSelector(entry).toLowerCase(); + if (tokens.some((token) => !normalizedValue.includes(token))) { + return null; + } + + let score = 0; + + if (normalizedValue.includes(normalizedQuery)) { + score += 120; + } + if (normalizedValue.startsWith(normalizedQuery)) { + score += 60; + } + + for (const token of tokens) { + const tokenIndex = normalizedValue.indexOf(token); + if (tokenIndex === 0) { + score += 24; + } + score += Math.max(0, 12 - Math.min(tokenIndex, 12)); + } + + const firstTokenIndex = normalizedValue.indexOf(tokens[0]); + return { + entry, + score, + firstTokenIndex: firstTokenIndex === -1 ? Number.MAX_SAFE_INTEGER : firstTokenIndex, + normalizedValue, + }; + }) + .filter(Boolean) + .sort((a, b) => b.score - a.score || a.firstTokenIndex - b.firstTokenIndex || a.normalizedValue.localeCompare(b.normalizedValue)) + .map((item) => item.entry); +} + +function rankWidgetValues(values, query) { + const uniqueValues = [...new Set(values)]; + return rankAutocompleteEntries(uniqueValues, query); +} + +function insertWidgetValue(inputEl, inputText, context, selectedOption) { + const replacement = `[${context.nodeId}:${context.widgetName}=${context.quoteChar}${selectedOption}${context.quoteChar}]`; + const nextValue = inputText.slice(0, context.replaceStart) + replacement + inputText.slice(context.replaceEnd); + inputEl.value = nextValue; + + const cursorIndex = context.replaceStart + replacement.length; + inputEl.setSelectionRange(cursorIndex, cursorIndex); +} + +function insertWidgetName(inputEl, inputText, context, selectedWidgetName) { + const before = inputText.slice(0, context.widgetStart); + const after = inputText.slice(context.widgetEnd); + + let nextValue = before + selectedWidgetName + after; + let cursorIndex = context.widgetStart + selectedWidgetName.length; + + if (!context.hasEquals) { + nextValue = nextValue.slice(0, cursorIndex) + "='" + nextValue.slice(cursorIndex); + cursorIndex += 2; + } + + inputEl.value = nextValue; + inputEl.setSelectionRange(cursorIndex, cursorIndex); +} + +function insertNodeId(inputEl, inputText, context, selectedNodeId) { + const before = inputText.slice(0, context.nodeStart); + const after = inputText.slice(context.nodeEnd); + const separator = context.hasColon ? '' : ':'; + const nextValue = before + selectedNodeId + separator + after; + const cursorIndex = context.nodeStart + selectedNodeId.length + 1; + + inputEl.value = nextValue; + inputEl.setSelectionRange(cursorIndex, cursorIndex); +} + +function showAutocompleteOptions(inputEl, options, onSelect) { + if (options.length === 0) { + ttN_RemoveDropdown(); + return; + } + + ttN_CreateDropdown(inputEl, options, onSelect); +} + +function tryValueCompletion(inputEl, inputText, cursorPosition, nodeWidgets) { + const valueCompletionContext = getValueCompletionContext(inputText, cursorPosition); + if (!valueCompletionContext) { + return false; + } + + const widgetValues = getNodeWidgetValues( + nodeWidgets, + valueCompletionContext.nodeId, + valueCompletionContext.widgetName, + valueCompletionContext.lookupWidgetName, + ); + + const filteredValues = rankWidgetValues(widgetValues, valueCompletionContext.valueQuery); + showAutocompleteOptions(inputEl, filteredValues, (selectedOption) => { + insertWidgetValue(inputEl, inputEl.value, valueCompletionContext, selectedOption); + }); + return true; +} + +function tryWidgetCompletion(inputEl, inputText, cursorPosition, nodeWidgets) { + const widgetCompletionContext = getWidgetCompletionContext(inputText, cursorPosition); + if (!widgetCompletionContext) { + return false; + } + + const widgetNames = getNodeWidgetNames(nodeWidgets, widgetCompletionContext.nodeId); + const filteredWidgetNames = rankAutocompleteEntries(widgetNames, widgetCompletionContext.widgetQuery); + showAutocompleteOptions(inputEl, filteredWidgetNames, (selectedWidgetName) => { + insertWidgetName(inputEl, inputEl.value, widgetCompletionContext, selectedWidgetName); + }); + return true; +} + +function tryNodeCompletion(inputEl, inputText, cursorPosition, nodeWidgets) { + const nodeCompletionContext = getNodeCompletionContext(inputText, cursorPosition); + if (!nodeCompletionContext) { + return false; + } + + const nodeEntries = getNodeEntries(nodeWidgets); + const filteredNodeEntries = rankAutocompleteEntries(nodeEntries, nodeCompletionContext.nodeQuery, (nodeEntry) => nodeEntry.searchText); + const nodeIdByLabel = new Map(filteredNodeEntries.map((nodeEntry) => [nodeEntry.label, nodeEntry.nodeId])); + const nodeOptions = filteredNodeEntries.map((nodeEntry) => nodeEntry.label); + + showAutocompleteOptions(inputEl, nodeOptions, (selectedNodeLabel) => { + const selectedNodeId = nodeIdByLabel.get(selectedNodeLabel); + if (!selectedNodeId) { + return; + } + insertNodeId(inputEl, inputEl.value, nodeCompletionContext, selectedNodeId); + }); + return true; +} + +function dropdownCreator(node) { + if (node.widgets) { + const widgets = node.widgets.filter( + (n) => (n.type === "customtext") + ); + + for (const w of widgets) { + + const onInput = function () { + const nodeWidgets = getNodesWidgetsDict(node, true); + const inputText = w.inputEl.value; + const cursorPosition = w.inputEl.selectionStart; + + if (tryValueCompletion(w.inputEl, inputText, cursorPosition, nodeWidgets)) { + return; + } + + if (tryWidgetCompletion(w.inputEl, inputText, cursorPosition, nodeWidgets)) { + return; + } + + if (tryNodeCompletion(w.inputEl, inputText, cursorPosition, nodeWidgets)) { + return; + } + + let lines = inputText.split('\n'); + if (lines.length === 0) return; + + let cursorLineIndex = 0; + let lineStartPosition = 0; + + for (let i = 0; i < lines.length; i++) { + const lineEndPosition = lineStartPosition + lines[i].length; + if (cursorPosition <= lineEndPosition) { + cursorLineIndex = i; + break; + } + lineStartPosition = lineEndPosition + 1; + } + + ttN_CreateDropdown(w.inputEl, nodeWidgets, (selectedOption, fullpath) => { + const data = fullpath.split('###'); + const parts = data[0].split('/'); + let output; + if (parts[0] === 'Add Plot Line') { + const labelType = parts[1]; + let label; + switch (labelType) { + case 'Only Values Label': + label = 'v_label'; + break; + case 'Title and Values Label': + label = 'tv_label'; + break; + case 'ID, Title and Values Label': + label = 'idtv_label'; + break; + } + + let lastOpeningAxisBracket = -1; + let lastClosingAxisBracket = -1; + + let bracketCount = 0; + for (let i = 0; i < inputText.length; i++) { + if (inputText[i] === '[') { + bracketCount++; + } else if (inputText[i] === ']') { + bracketCount--; + } else if (inputText[i] === '<' && bracketCount === 0) { + lastOpeningAxisBracket = i; + } else if (inputText[i] === '>' && bracketCount === 0) { + lastClosingAxisBracket = i; + } + } + + const lastAxisBracket = inputText.substring(lastOpeningAxisBracket + 1, lastClosingAxisBracket).split(':')[0]; + let nextAxisBracketNumber; + + if (inputText.trim() === '') { + w.inputEl.value = `<1:${label}>\n`; + return + } + + if (lastAxisBracket) { + const lastAxisBracketNumber = Number(lastAxisBracket); + if (!isNaN(lastAxisBracketNumber)) { + nextAxisBracketNumber = lastAxisBracketNumber + 1; + output = `<${nextAxisBracketNumber}:${label}>\n`; + if (inputText[inputText.length - 1] === '\n') { + w.inputEl.value = `${inputText}${output}` + } else { + w.inputEl.value = `${inputText}\n${output}` + } + return + } + } + return + } + if (parts[0] === 'Connect to advanced xyPlot for options') { + return + } + + if (selectedOption === 'Random Seed') { + const [max, min, step] = data[1].split('/'); + + const randMax = Math.min(1125899906842624, Number(max)); + const randMin = Math.max(0, Number(min)); + const randomRange = (randMax - Math.max(0, randMin)) / (Number(step) / 10); + selectedOption = Math.floor(Math.random() * randomRange) * (Number(step) / 10) + randMin; + } + const nodeID = data[0].split(' - ')[0].replace('[', '').replace(']', ''); + + output = `[${nodeID}:${parts[1]}='${selectedOption}']`; + + if (inputText.trim() === '') { + output = `<1:v_label>\n` + output; + } + + if (lines[cursorLineIndex].trim() === '') { + lines[cursorLineIndex] = output; + } else { + lines.splice(cursorLineIndex + 1, 0, output); + } + + w.inputEl.value = lines.join('\n'); + + }, true); + }; + + w.inputEl.removeEventListener('input', onInput); + w.inputEl.addEventListener('input', onInput); + w.inputEl.removeEventListener('mouseup', onInput); + w.inputEl.addEventListener('mouseup', onInput); + } + } +} + +function findUpstreamXYPlot(targetID) { + const currentNode = app.graph._nodes_by_id[targetID]; + if (!currentNode) { + return + } + if (currentNode.constructor.title === 'advanced xyPlot') { + return currentNode; + } else { + if (!currentNode.outputs) { + return + } + for (const output of currentNode.outputs) { + if (output.links?.length > 0) { + for (const link of output.links) { + const xyPlotNode = findUpstreamXYPlot(app.graph.links[link].target_id) + if (xyPlotNode) { + return xyPlotNode + } + } + } + } + } +} + +function setPlotNodeOptions(currentNode, targetID=null) { + if (!targetID) { + for (const output of currentNode.outputs) { + if (output.links?.length > 0) { + for (const link of output.links) { + targetID = app.graph.links[link].target_id + } + } + } + } + const xyPlotNode = findUpstreamXYPlot(targetID) + if (!xyPlotNode) { + return + } + const widgets_dict = getNodesWidgetsDict(xyPlotNode) + const currentWidget = currentNode.widgets.find(w => w.name === 'node'); + if (currentWidget) { + currentWidget.options.values = Object.keys(widgets_dict) + } +} + +function setPlotWidgetOptions(currentNode, searchType) { + const { value } = currentNode.widgets.find(w => w.name === 'node'); + const nodeIdRegex = /\[(\d+)\]/; + const match = value.match(nodeIdRegex); + const nodeId = match ? parseInt(match[1], 10) : null; + if (!nodeId) return; + + const optionNode = app.graph._nodes_by_id[nodeId]; + if (!optionNode || !optionNode.widgets) return; + + const widgetsList = Object.values(optionNode.widgets) + .filter( + function(w) { + if (searchType) { + return searchType.includes(w.type) + } + } + ) + .map((w) => w.name); + + if (widgetsList) { + for (const w of currentNode.widgets) { + if (w.name === 'widget') { + w.options.values = widgetsList + } + } + } + + + const widgetWidget = currentNode.widgets.find(w => w.name === 'widget'); + const widgetWidgetValue = widgetWidget.value; + + if (searchType.includes('number')) { + const int_widgets = [ + 'seed', + 'clip_skip', + 'steps', + 'start_at_step', + 'end_at_step', + 'empty_latent_width', + 'empty_latent_height', + 'noise_seed', + ] + const float_widgets = [ + 'cfg', + 'denoise', + 'strength_model', + 'strength_clip', + 'strength', + 'scale_by', + 'lora_strength' + ] + + const rangeModeWidget = currentNode.widgets.find(w => w.name === 'range_mode'); + const rangeModeWidgetValue = rangeModeWidget.value; + + if (int_widgets.includes(widgetWidgetValue)) { + rangeModeWidget.options.values = ['step_int', 'num_steps_int'] + if (rangeModeWidgetValue === 'num_steps_float') { + rangeModeWidget.value = 'num_steps_int' + } + if (rangeModeWidgetValue === 'step_float') { + rangeModeWidget.value = 'step_int' + } + } else if (float_widgets.includes(widgetWidgetValue)) { + rangeModeWidget.options.values = ['step_float', 'num_steps_float'] + rangeModeWidget.value.replace('int', 'float') + if (rangeModeWidgetValue === 'num_steps_int') { + rangeModeWidget.value = 'num_steps_float' + } + if (rangeModeWidgetValue === 'step_int') { + rangeModeWidget.value = 'step_float' + } + } else { + rangeModeWidget.options.values = ['step_int', 'num_steps_int', 'step_float', 'num_steps_float'] + } + } + if (searchType.includes('combo')) { + const optionsWidget = optionNode.widgets.find(w => w.name === widgetWidgetValue) + if (optionsWidget) { + const values = optionsWidget.options.values + currentNode.widgets.find(w => w.name === 'start_from').options.values = values + currentNode.widgets.find(w => w.name === 'end_with').options.values = values + currentNode.widgets.find(w => w.name === 'select').options.values = values + } + } +} + +const getSetWidgets = [ + "node", + "widget", + "start_from", + "end_with", +] + +function getSetters(node, searchType) { + if (node.widgets) { + const gswidgets = node.widgets.filter(function(widget) { + return getSetWidgets.includes(widget.name); + }); + for (const w of gswidgets) { + setPlotWidgetOptions(node, searchType); + let widgetValue = w.value; + + // Define getters and setters for widget values + Object.defineProperty(w, 'value', { + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + setPlotWidgetOptions(node, searchType); + } + } + }); + } + + const selectWidget = node.widgets.find(w => w.name === 'select') + if (selectWidget) { + let widgetValue = selectWidget.value; + let selectedWidget = node.widgets.find(w => w.name === 'selection'); + + Object.defineProperty(selectWidget, 'value', { + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + if (selectedWidget.inputEl.value.trim() === '') { + selectedWidget.inputEl.value = newVal; + } else { + selectedWidget.inputEl.value += "\n" + newVal; + } + } + } + }) + } + } + let mouseOver = node.mouseOver; + Object.defineProperty(node, 'mouseOver', { + get() { + return mouseOver; + }, + set(newVal) { + if (newVal !== mouseOver) { + mouseOver = newVal; + if (mouseOver) { + setPlotWidgetOptions(node, searchType); + setPlotNodeOptions(node); + } + } + } + }) + +} + + +app.registerExtension({ + name: "comfy.ttN.xyPlotAdv", + beforeRegisterNodeDef(nodeType, nodeData, app) { + + /*if (nodeData.name === "ttN advPlot range") { + const origOnConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, slotIndex, isConnected, link_info, _ioSlot) { + const r = origOnConnectionsChange ? origOnConnectionsChange.apply(this, arguments) : undefined; + if (link_info && (slotIndex == 0 || slotIndex == 1)) { + const originID = link_info?.origin_id + const targetID = link_info?.target_id + + const currentNode = app.graph._nodes_by_id[originID]; + + setPlotNodeOptions(currentNode, targetID) + } + return r; + }; + }*/ + }, + nodeCreated(node) { + const node_title = node.constructor.title; + + if (node_title === "advanced xyPlot") { + dropdownCreator(node); + } + if (node_title === "advPlot range") { + getSetters(node, ['number',]); + } + if (node_title === "advPlot string") { + getSetters(node, ['text', 'customtext']); + } + if (node_title === "advPlot combo") { + getSetters(node, ['combo',]); + } + }, +}); diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/utils.js b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/utils.js new file mode 100644 index 0000000000000000000000000000000000000000..4283a4ef2784bb8e8b3aeda88588da6c708d0e12 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/js/utils.js @@ -0,0 +1,261 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; + + +export function rebootAPI() { + if (confirm("Are you sure you'd like to reboot the server?")) { + try { + api.fetchApi("/ttN/reboot"); + } + catch(exception) { + console.log("Failed to reboot: " + exception); + } + return true; + } + + return false; +} + +export function wait(ms = 16, value) { + return new Promise((resolve) => { + setTimeout(() => { + resolve(value); + }, ms); + }); +} + + + + + + +const CONVERTED_TYPE = "converted-widget"; +const GET_CONFIG = Symbol(); + +export function getConfig(widgetName, node) { + const { nodeData } = node.constructor; + return nodeData?.input?.required[widgetName] ?? nodeData?.input?.optional?.[widgetName]; +} + +export function hideWidget(node, widget, suffix = "") { + widget.origType = widget.type; + widget.origComputeSize = widget.computeSize; + widget.origSerializeValue = widget.serializeValue; + widget.computeSize = () => [0, -4]; // -4 is due to the gap litegraph adds between widgets automatically + widget.type = CONVERTED_TYPE + suffix; + widget.serializeValue = () => { + // Prevent serializing the widget if we have no input linked + if (!node.inputs) { + return undefined; + } + let node_input = node.inputs.find((i) => i.widget?.name === widget.name); + + if (!node_input || !node_input.link) { + return undefined; + } + return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidget(node, w, ":" + widget.name); + } + } +} + +export function getWidgetType(config) { + // Special handling for COMBO so we restrict links based on the entries + let type = config[0]; + if (type instanceof Array) { + type = "COMBO"; + } + return { type }; +} + +export function convertToInput(node, widget, config) { + hideWidget(node, widget); + + const { type } = getWidgetType(config); + + // Add input and store widget config for creating on primitive node + const sz = node.size; + node.addInput(widget.name, type, { + widget: { name: widget.name, [GET_CONFIG]: () => config }, + }); + + for (const widget of node.widgets) { + widget.last_y += LiteGraph.NODE_SLOT_HEIGHT; + } + + // Restore original size but grow if needed + node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])]); +} + +export function tinyterraReloadNode(node) { + // Retrieves original values or uses current ones as fallback. Options for creating a new node. + const { title: nodeTitle, color: nodeColor, bgcolor: bgColor } = node.properties.origVals || node; + const options = { + size: [...node.size], + color: nodeColor, + bgcolor: bgColor, + pos: [...node.pos] + }; + + // Store a reference to the old node before it gets replaced. + const oldNode = node + + // Track connections to re-establish later. + const inputConnections = [], outputConnections = []; + if (node.inputs) { + for (const input of node.inputs ?? []) { + if (input.link) { + const input_name = input.name + const input_slot = node.findInputSlot(input_name) + const input_node = node.getInputNode(input_slot) + const input_link = node.getInputLink(input_slot) + + inputConnections.push([input_link.origin_slot, input_node, input_name]) + } + } + } + if (node.outputs) { + for (const output of node.outputs) { + if (output.links) { + const output_name = output.name + + for (const linkID of output.links) { + const output_link = graph.links[linkID] + const output_node = graph._nodes_by_id[output_link.target_id] + outputConnections.push([output_name, output_node, output_link.target_slot]) + } + } + } + } + // Remove old node and create a new one. + app.graph.remove(node) + const newNode = app.graph.add(LiteGraph.createNode(node.constructor.type, nodeTitle, options)); + if (newNode?.constructor?.hasOwnProperty('ttNnodeVersion')) { + newNode.properties.ttNnodeVersion = newNode.constructor.ttNnodeVersion; + } + + // A function to handle reconnection of links to the new node. + function handleLinks() { + for (let ow of oldNode.widgets) { + if (ow.type === CONVERTED_TYPE) { + const config = getConfig(ow.name, oldNode) + const WidgetToConvert = newNode.widgets.find((nw) => nw.name === ow.name); + if (WidgetToConvert && !newNode?.inputs?.find((i) => i.name === ow.name)) { + convertToInput(newNode, WidgetToConvert, config); + } + } + } + + // replace input and output links + for (let input of inputConnections) { + const [output_slot, output_node, input_name] = input; + output_node.connect(output_slot, newNode.id, input_name) + } + for (let output of outputConnections) { + const [output_name, input_node, input_slot] = output; + newNode.connect(output_name, input_node, input_slot) + } + } + + // fix widget values + let values = oldNode.widgets_values; + if (!values) { + console.log('NO VALUES') + newNode.widgets.forEach((newWidget, index) => { + let pass = false + while ((index < oldNode.widgets.length) && !pass) { + const oldWidget = oldNode.widgets[index]; + if (newWidget.type === oldWidget.type) { + newWidget.value = oldWidget.value; + pass = true + } + index++; + } + }); + } + else { + let isValid = false + const isIterateForwards = values.length <= newNode.widgets.length; + let valueIndex = isIterateForwards ? 0 : values.length - 1; + + const parseWidgetValue = (value, widget) => { + if (['', null].includes(value) && (widget.type === "button" || widget.type === "converted-widget")) { + return { value, isValid: true }; + } + if (typeof value === "boolean" && widget.options?.on && widget.options?.off) { + return { value, isValid: true }; + } + if (widget.options?.values?.includes(value)) { + return { value, isValid: true }; + } + if (widget.inputEl) { + if (typeof value === "string" || value === widget.value) { + return { value, isValid: true }; + } + } + if (!isNaN(value)) { + value = parseFloat(value); + if (widget.options?.min <= value && value <= widget.options?.max) { + return { value, isValid: true }; + } + } + return { value: widget.value, isValid: false }; + }; + + function updateValue(widgetIndex) { + const oldWidget = oldNode.widgets[widgetIndex]; + let newWidget = newNode.widgets[widgetIndex]; + let newValueIndex = valueIndex + + if (newWidget.name === oldWidget.name && (newWidget.type === oldWidget.type || oldWidget.type === 'ttNhidden' || newWidget.type === 'ttNhidden')) { + + while ((isIterateForwards ? newValueIndex < values.length : newValueIndex >= 0) && !isValid) { + let { value, isValid } = parseWidgetValue(values[newValueIndex], newWidget); + if (isValid && value !== NaN) { + newWidget.value = value; + break; + } + newValueIndex += isIterateForwards ? 1 : -1; + } + + if (isIterateForwards) { + if (newValueIndex === valueIndex) { + valueIndex++; + } + if (newValueIndex === valueIndex + 1) { + valueIndex++; + valueIndex++; + } + } else { + if (newValueIndex === valueIndex) { + valueIndex--; + } + if (newValueIndex === valueIndex - 1) { + valueIndex--; + valueIndex--; + } + } + //console.log('\n') + } + }; + if (isIterateForwards) { + for (let widgetIndex = 0; widgetIndex < newNode.widgets.length; widgetIndex++) { + updateValue(widgetIndex); + } + } else { + for (let widgetIndex = newNode.widgets.length - 1; widgetIndex >= 0; widgetIndex--) { + updateValue(widgetIndex); + } + } + } + handleLinks(); + + newNode.setSize(options.size) + newNode.onResize([0,0]); +}; \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/pyproject.toml b/zavodik/nodes/ComfyUI_tinyterraNodes-main/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..1ffbdb8c41090b4dba8d188d2e9584a1bd1f522d --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/pyproject.toml @@ -0,0 +1,14 @@ +[project] +name = "comfyui_tinyterranodes" +description = "Customizable xyPlot, various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more." +version = "2.0.11" +license = { file = "LICENSE" } + +[project.urls] +Repository = "https://github.com/TinyTerra/ComfyUI_tinyterraNodes" + +[tool.comfy] +PublisherId = "tinyterra" +DisplayName = "tinyterraNodes" +Icon = "images/icon.jpg" +Models = [] diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNdev.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNdev.py new file mode 100644 index 0000000000000000000000000000000000000000..78e347e2b18a3aa04532b5dc11c99f9f66598426 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNdev.py @@ -0,0 +1,34 @@ +# in_dev - likely broken +class ttN_compareInput: + @classmethod + def INPUT_TYPES(s): + return {"required": {"console_title": ("STRING", {"default": "ttN INPUT COMPARE"}),}, + "optional": {"debug": ("", {"default": None}), + "debug2": ("", {"default": None}),} + } + + RETURN_TYPES = tuple() + RETURN_NAMES = tuple() + FUNCTION = "debug" + CATEGORY = "🌏 tinyterra/dev" + OUTPUT_NODE = True + + def debug(_, **kwargs): + + values = [] + for key, value in kwargs.items(): + if key == "console_title": + print(value) + else: + print(f"{key}: {value}") + values.append(value) + + return tuple() + +NODE_CLASS_MAPPINGS = { + "ttN compareInput": ttN_compareInput, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ttN compareInput": "compareInput", +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/adv_encode.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/adv_encode.py new file mode 100644 index 0000000000000000000000000000000000000000..74fa17293e5f26cf1b9c7536d7e9204c6a7bb412 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/adv_encode.py @@ -0,0 +1,401 @@ +import torch +import numpy as np +import itertools +from math import gcd + +from comfy import model_management +from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG, StableCascadeClipModel +try: + from comfy.text_encoders.sd3_clip import SD3ClipModel, T5XXLModel +except ImportError: + from comfy.sd3_clip import SD3ClipModel, T5XXLModel + +try: + from comfy.text_encoders.flux import FluxClipModel +except: + FluxClipModel = None + pass + +def _grouper(n, iterable): + it = iter(iterable) + while True: + chunk = list(itertools.islice(it, n)) + if not chunk: + return + yield chunk + +def _norm_mag(w, n): + d = w - 1 + return 1 + np.sign(d) * np.sqrt(np.abs(d) ** 2 / n) + # return np.sign(w) * np.sqrt(np.abs(w)**2 / n) + +def divide_length(word_ids, weights): + sums = dict(zip(*np.unique(word_ids, return_counts=True))) + sums[0] = 1 + weights = [[_norm_mag(w, sums[id]) if id != 0 else 1.0 + for w, id in zip(x, y)] for x, y in zip(weights, word_ids)] + return weights + +def shift_mean_weight(word_ids, weights): + delta = 1 - np.mean([w for x, y in zip(weights, word_ids) for w, id in zip(x, y) if id != 0]) + weights = [[w if id == 0 else w + delta + for w, id in zip(x, y)] for x, y in zip(weights, word_ids)] + return weights + +def scale_to_norm(weights, word_ids, w_max): + top = np.max(weights) + w_max = min(top, w_max) + weights = [[w_max if id == 0 else (w / top) * w_max + for w, id in zip(x, y)] for x, y in zip(weights, word_ids)] + return weights + +def from_zero(weights, base_emb): + weight_tensor = torch.tensor(weights, dtype=base_emb.dtype, device=base_emb.device) + weight_tensor = weight_tensor.reshape(1, -1, 1).expand(base_emb.shape) + return base_emb * weight_tensor + +def mask_word_id(tokens, word_ids, target_id, mask_token): + new_tokens = [[mask_token if wid == target_id else t + for t, wid in zip(x, y)] for x, y in zip(tokens, word_ids)] + mask = np.array(word_ids) == target_id + return (new_tokens, mask) + +def batched_clip_encode(tokens, length, encode_func, num_chunks): + embs = [] + for e in _grouper(32, tokens): + enc, pooled = encode_func(e) + try: + enc = enc.reshape((len(e), length, -1)) + except: + raise Exception("Down_Weight and Comfy++ weight interpretations are not currently supported with this model.") + embs.append(enc) + embs = torch.cat(embs) + embs = embs.reshape((len(tokens) // num_chunks, length * num_chunks, -1)) + return embs + +def from_masked(tokens, weights, word_ids, base_emb, length, encode_func, m_token=266): + pooled_base = base_emb[0, length - 1:length, :] + wids, inds = np.unique(np.array(word_ids).reshape(-1), return_index=True) + weight_dict = dict((id, w) + for id, w in zip(wids, np.array(weights).reshape(-1)[inds]) + if w != 1.0) + + if len(weight_dict) == 0: + return torch.zeros_like(base_emb), base_emb[0, length - 1:length, :] + + weight_tensor = torch.tensor(weights, dtype=base_emb.dtype, device=base_emb.device) + weight_tensor = weight_tensor.reshape(1, -1, 1).expand(base_emb.shape) + + # m_token = (clip.tokenizer.end_token, 1.0) if clip.tokenizer.pad_with_end else (0,1.0) + # TODO: find most suitable masking token here + m_token = (m_token, 1.0) + + ws = [] + masked_tokens = [] + masks = [] + + # create prompts + for id, w in weight_dict.items(): + masked, m = mask_word_id(tokens, word_ids, id, m_token) + masked_tokens.extend(masked) + + m = torch.tensor(m, dtype=base_emb.dtype, device=base_emb.device) + m = m.reshape(1, -1, 1).expand(base_emb.shape) + masks.append(m) + + ws.append(w) + + # batch process prompts + embs = batched_clip_encode(masked_tokens, length, encode_func, len(tokens)) + masks = torch.cat(masks) + + embs = (base_emb.expand(embs.shape) - embs) + pooled = embs[0, length - 1:length, :] + + embs *= masks + embs = embs.sum(axis=0, keepdim=True) + + pooled_start = pooled_base.expand(len(ws), -1) + ws = torch.tensor(ws).reshape(-1, 1).expand(pooled_start.shape) + pooled = (pooled - pooled_start) * (ws - 1) + pooled = pooled.mean(axis=0, keepdim=True) + + return ((weight_tensor - 1) * embs), pooled_base + pooled + +def mask_inds(tokens, inds, mask_token): + clip_len = len(tokens[0]) + inds_set = set(inds) + new_tokens = [[mask_token if i * clip_len + j in inds_set else t + for j, t in enumerate(x)] for i, x in enumerate(tokens)] + return new_tokens + +def down_weight(tokens, weights, word_ids, base_emb, length, encode_func, m_token=266): + w, w_inv = np.unique(weights, return_inverse=True) + + if np.sum(w < 1) == 0: + return base_emb, tokens, base_emb[0, length - 1:length, :] + # m_token = (clip.tokenizer.end_token, 1.0) if clip.tokenizer.pad_with_end else (0,1.0) + # using the comma token as a masking token seems to work better than aos tokens for SD 1.x + m_token = (m_token, 1.0) + + masked_tokens = [] + + masked_current = tokens + for i in range(len(w)): + if w[i] >= 1: + continue + masked_current = mask_inds(masked_current, np.where(w_inv == i)[0], m_token) + masked_tokens.extend(masked_current) + + embs = batched_clip_encode(masked_tokens, length, encode_func, len(tokens)) + embs = torch.cat([base_emb, embs]) + w = w[w <= 1.0] + w_mix = np.diff([0] + w.tolist()) + w_mix = torch.tensor(w_mix, dtype=embs.dtype, device=embs.device).reshape((-1, 1, 1)) + + weighted_emb = (w_mix * embs).sum(axis=0, keepdim=True) + return weighted_emb, masked_current, weighted_emb[0, length - 1:length, :] + +def scale_emb_to_mag(base_emb, weighted_emb): + norm_base = torch.linalg.norm(base_emb) + norm_weighted = torch.linalg.norm(weighted_emb) + embeddings_final = (norm_base / norm_weighted) * weighted_emb + return embeddings_final + +def recover_dist(base_emb, weighted_emb): + fixed_std = (base_emb.std() / weighted_emb.std()) * (weighted_emb - weighted_emb.mean()) + embeddings_final = fixed_std + (base_emb.mean() - fixed_std.mean()) + return embeddings_final + +def A1111_renorm(base_emb, weighted_emb): + embeddings_final = (base_emb.mean() / weighted_emb.mean()) * weighted_emb + return embeddings_final + +def advanced_encode_from_tokens(tokenized, token_normalization, weight_interpretation, encode_func, m_token=266, + length=77, w_max=1.0, return_pooled=False, apply_to_pooled=False): + tokens = [[t for t, _, _ in x] for x in tokenized] + weights = [[w for _, w, _ in x] for x in tokenized] + word_ids = [[wid for _, _, wid in x] for x in tokenized] + + # weight normalization + # ==================== + + # distribute down/up weights over word lengths + if token_normalization.startswith("length"): + weights = divide_length(word_ids, weights) + + # make mean of word tokens 1 + if token_normalization.endswith("mean"): + weights = shift_mean_weight(word_ids, weights) + + # weight interpretation + # ===================== + pooled = None + + if weight_interpretation == "comfy": + weighted_tokens = [[(t, w) for t, w in zip(x, y)] for x, y in zip(tokens, weights)] + weighted_emb, pooled_base = encode_func(weighted_tokens) + pooled = pooled_base + else: + unweighted_tokens = [[(t, 1.0) for t, _, _ in x] for x in tokenized] + base_emb, pooled_base = encode_func(unweighted_tokens) + + if weight_interpretation == "A1111": + weighted_emb = from_zero(weights, base_emb) + weighted_emb = A1111_renorm(base_emb, weighted_emb) + pooled = pooled_base + + if weight_interpretation == "compel": + pos_tokens = [[(t, w) if w >= 1.0 else (t, 1.0) for t, w in zip(x, y)] for x, y in zip(tokens, weights)] + weighted_emb, _ = encode_func(pos_tokens) + weighted_emb, _, pooled = down_weight(pos_tokens, weights, word_ids, weighted_emb, length, encode_func) + + if weight_interpretation == "comfy++": + weighted_emb, tokens_down, _ = down_weight(unweighted_tokens, weights, word_ids, base_emb, length, encode_func) + weights = [[w if w > 1.0 else 1.0 for w in x] for x in weights] + # unweighted_tokens = [[(t,1.0) for t, _, _ in x] for x in tokens_down] + embs, pooled = from_masked(unweighted_tokens, weights, word_ids, base_emb, length, encode_func) + weighted_emb += embs + + if weight_interpretation == "down_weight": + weights = scale_to_norm(weights, word_ids, w_max) + weighted_emb, _, pooled = down_weight(unweighted_tokens, weights, word_ids, base_emb, length, encode_func) + + if return_pooled: + if apply_to_pooled: + return weighted_emb, pooled + else: + return weighted_emb, pooled_base + return weighted_emb, None + +def encode_token_weights_g(model, token_weight_pairs): + return model.clip_g.encode_token_weights(token_weight_pairs) + +def encode_token_weights_l(model, token_weight_pairs): + return model.clip_l.encode_token_weights(token_weight_pairs) + +def encode_token_weights_t5(model, token_weight_pairs): + return model.t5xxl.encode_token_weights(token_weight_pairs) + +def encode_token_weights(model, token_weight_pairs, encode_func): + if model.layer_idx is not None: + model.cond_stage_model.set_clip_options({"layer": model.layer_idx}) + + model_management.load_model_gpu(model.patcher) + return encode_func(model.cond_stage_model, token_weight_pairs) + +def prepareXL(embs_l, embs_g, pooled, clip_balance): + l_w = 1 - max(0, clip_balance - .5) * 2 + g_w = 1 - max(0, .5 - clip_balance) * 2 + if embs_l is not None: + return torch.cat([embs_l * l_w, embs_g * g_w], dim=-1), pooled + else: + return embs_g, pooled + +def prepareSD3(out, pooled, clip_balance): + lg_w = 1 - max(0, clip_balance - .5) * 2 + t5_w = 1 - max(0, .5 - clip_balance) * 2 + if out.shape[0] > 1: + return torch.cat([out[0] * lg_w, out[1] * t5_w], dim=-1), pooled + else: + return out, pooled + +def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True): + tokenized = clip.tokenize(text, return_word_ids=True) + + if SD3ClipModel and isinstance(clip.cond_stage_model, SD3ClipModel): + lg_out = None + pooled = None + out = None + + if len(tokenized['l']) > 0 or len(tokenized['g']) > 0: + if 'l' in tokenized: + lg_out, l_pooled = advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, return_pooled=True,) + else: + l_pooled = torch.zeros((1, 768), device=model_management.intermediate_device()) + + if 'g' in tokenized: + g_out, g_pooled = advanced_encode_from_tokens(tokenized['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, return_pooled=True) + if lg_out is not None: + lg_out = torch.cat([lg_out, g_out], dim=-1) + else: + lg_out = torch.nn.functional.pad(g_out, (768, 0)) + else: + g_out = None + g_pooled = torch.zeros((1, 1280), device=model_management.intermediate_device()) + + if lg_out is not None: + lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1])) + out = lg_out + pooled = torch.cat((l_pooled, g_pooled), dim=-1) + + # t5xxl + if 't5xxl' in tokenized and clip.cond_stage_model.t5xxl is not None: + t5_out, t5_pooled = advanced_encode_from_tokens(tokenized['t5xxl'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_t5), + w_max=w_max, return_pooled=True) + if lg_out is not None: + out = torch.cat([lg_out, t5_out], dim=-2) + else: + out = t5_out + + if out is None: + out = torch.zeros((1, 77, 4096), device=model_management.intermediate_device()) + + if pooled is None: + pooled = torch.zeros((1, 768 + 1280), device=model_management.intermediate_device()) + + return prepareSD3(out, pooled, clip_balance) + + elif FluxClipModel and isinstance(clip.cond_stage_model, FluxClipModel): + if 't5xxl' in tokenized and clip.cond_stage_model.t5xxl is not None: + t5_out, t5_pooled = advanced_encode_from_tokens(tokenized['t5xxl'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_t5), + w_max=w_max, return_pooled=True,) + + if len(tokenized['l']) > 0: + if 'l' in tokenized: + l_out, l_pooled = advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, return_pooled=True,) + else: + l_pooled = torch.zeros((1, 768), device=model_management.intermediate_device()) + + return t5_out, l_pooled + + elif isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)): + embs_l = None + embs_g = None + pooled = None + if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel): + embs_l, _ = advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, + return_pooled=False) + if 'g' in tokenized: + embs_g, pooled = advanced_encode_from_tokens(tokenized['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, + return_pooled=True, + apply_to_pooled=apply_to_pooled) + return prepareXL(embs_l, embs_g, pooled, clip_balance) + + elif isinstance(clip.cond_stage_model, StableCascadeClipModel): + return advanced_encode_from_tokens( + tokenized['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, + return_pooled=True, + apply_to_pooled=apply_to_pooled + ) + else: + return advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: (clip.encode_from_tokens({'l': x}), None), + w_max=w_max) + +def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True): + tokenized1 = clip.tokenize(text1, return_word_ids=True) + tokenized2 = clip.tokenize(text2, return_word_ids=True) + + embs_l, _ = advanced_encode_from_tokens(tokenized1['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, + return_pooled=False) + + embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, + return_pooled=True, + apply_to_pooled=apply_to_pooled) + + gcd_num = gcd(embs_l.shape[1], embs_g.shape[1]) + repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1]) + repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1]) + + return prepareXL(embs_l.expand((-1,repeat_l,-1)), embs_g.expand((-1,repeat_g,-1)), pooled, clip_balance) \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/tinyterraNodes.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/tinyterraNodes.py new file mode 100644 index 0000000000000000000000000000000000000000..fa002ea55a73fb4510ec7dcbdb849ea42324671e --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/tinyterraNodes.py @@ -0,0 +1,3824 @@ +""" +@author: tinyterra +@title: tinyterraNodes +@nickname: 🌏 +@description: This extension offers extensive xyPlot, various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more. +""" + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# tinyterraNodes developed in 2023 by tinyterra https://github.com/TinyTerra # +# for ComfyUI https://github.com/comfyanonymous/ComfyUI # +# Like the pack and want to support me? https://www.buymeacoffee.com/tinyterra # +#---------------------------------------------------------------------------------------------------------------------------------------------------# + +ttN_version = '2.0.9' + +import asyncio +import os +import re +import json +import copy +import random +import datetime +from pathlib import Path +from urllib.request import urlopen +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple, Union, Any +import uuid + +import numpy as np +import torch +import hashlib +from PIL import Image, ImageDraw, ImageFont +from PIL.PngImagePlugin import PngInfo + +import nodes +import comfy.sd +import execution +import comfy.utils +import folder_paths +import comfy.samplers +import latent_preview +import comfy.controlnet +import comfy.model_management +import comfy.supported_models +import comfy.supported_models_base +from comfy.model_base import BaseModel +import comfy_extras.nodes_upscale_model +import comfy_extras.nodes_model_advanced +from comfy.sd import CLIP, VAE +from spandrel import ModelLoader, ImageModelDescriptor +from .adv_encode import advanced_encode +from comfy.model_patcher import ModelPatcher +from nodes import MAX_RESOLUTION, ControlNetApplyAdvanced, ConditioningZeroOut +from nodes import NODE_CLASS_MAPPINGS as COMFY_CLASS_MAPPINGS + +from .utils import CC, ttNl, ttNpaths, AnyType +from .ttNexecutor import xyExecutor + +OUTPUT_FILETYPES = ["png", "jpg", "jpeg", "tiff", "tif", "webp", "bmp"] +UPSCALE_METHODS = ["None", + "[latent] nearest-exact", "[latent] bilinear", "[latent] area", "[latent] bicubic", "[latent] lanczos", "[latent] bislerp", + "[hiresFix] nearest-exact", "[hiresFix] bilinear", "[hiresFix] area", "[hiresFix] bicubic", "[hiresFix] lanczos", "[hiresFix] bislerp"] +UPSCALE_MODELS = folder_paths.get_filename_list("upscale_models") + ["None"] +CROP_METHODS = ["disabled", "center"] +CUSTOM_SCHEDULERS = ["AYS SD1", "AYS SDXL", "AYS SVD", "GITS SD1"] + +class ttNloader: + def __init__(self): + self.loraDict = {lora.split('\\')[-1]: lora for lora in folder_paths.get_filename_list("loras")} + self.loader_cache = {} + + @staticmethod + def nsp_parse(text, seed=0, noodle_key='__', nspterminology=None, pantry_path=None, title=None, my_unique_id=None): + if "__" not in text: + return text + + if nspterminology is None: + # Fetch the NSP Pantry + if pantry_path is None: + pantry_path = os.path.join(ttNpaths.tinyterraNodes, 'nsp_pantry.json') + if not os.path.exists(pantry_path): + response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') + tmp_pantry = json.loads(response.read()) + # Dump JSON locally + pantry_serialized = json.dumps(tmp_pantry, indent=4) + with open(pantry_path, "w") as f: + f.write(pantry_serialized) + del response, tmp_pantry + + # Load local pantry + with open(pantry_path, 'r') as f: + nspterminology = json.load(f) + + if seed > 0 or seed < 0: + random.seed(seed) + + # Parse Text + new_text = text + for term in nspterminology: + # Target Noodle + tkey = f'{noodle_key}{term}{noodle_key}' + # How many occurrences? + tcount = new_text.count(tkey) + + if tcount > 0: + nsp_parsed = True + + # Apply random results for each noodle counted + for _ in range(tcount): + new_text = new_text.replace( + tkey, random.choice(nspterminology[term]), 1) + seed += 1 + random.seed(seed) + + ttNl(new_text).t(f'{title}[{my_unique_id}]').p() + + return new_text + + @staticmethod + def clean_values(values: str): + original_values = values.split("; ") + cleaned_values = [] + + for value in original_values: + cleaned_value = value.strip(';').strip() + if cleaned_value: + try: + cleaned_value = int(cleaned_value) + except ValueError: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + + cleaned_values.append(cleaned_value) + return cleaned_values + + @staticmethod + def string_to_seed(s): + h = hashlib.sha256(s.encode()).digest() + return (int.from_bytes(h, byteorder='big') & 0xffffffffffffffff) + + def clear_cache(self, prompt, full=False): + loader_ids = [f'loader{key}' for key, value in prompt.items() if value['class_type'] in ['ttN pipeLoader_v2', 'ttN pipeLoaderSDXL_v2']] + + if full is True: + self.loader_cache = {} + else: + for key in list(self.loader_cache.keys()): + if key not in loader_ids: + self.loader_cache.pop(key) + + def load_checkpoint(self, ckpt_name, config_name=None, clip_skip=0, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + if config_name not in [None, "Default"]: + config_path = folder_paths.get_full_path("configs", config_name) + loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + else: + loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + clip = loaded_ckpt[1].clone() if loaded_ckpt[1] is not None else None + if clip_skip != 0 and clip is not None: + if sampler.get_model_type(loaded_ckpt[0]) in ['FLUX', 'FLOW']: + raise Exception('FLOW and FLUX do not support clip_skip. Set clip_skip to 0.') + clip.clip_layer(clip_skip) + + # model, clip, vae + return loaded_ckpt[0], clip, loaded_ckpt[2] + + def load_unclip(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return out + + def load_vae(self, vae_name): + vae_path = folder_paths.get_full_path("vae", vae_name) + sd = comfy.utils.load_torch_file(vae_path) + loaded_vae = comfy.sd.VAE(sd=sd) + + return loaded_vae + + def load_controlNet(self, positive, negative, controlnet_name, image, strength, start_percent, end_percent): + if type(controlnet_name) == str: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_name) + controlnet = comfy.controlnet.load_controlnet(controlnet_path) + else: + controlnet = controlnet_name + + controlnet_conditioning = ControlNetApplyAdvanced().apply_controlnet(positive, negative, controlnet, image, strength, start_percent, end_percent) + base_positive, base_negative = controlnet_conditioning[0], controlnet_conditioning[1] + return base_positive, base_negative + + def load_lora(self, lora_name, model, clip, strength_model, strength_clip): + if strength_model == 0 and strength_clip == 0: + return (model, clip) + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None or not os.path.exists(lora_path): + ttNl(f'{lora_path}').t("Skipping missing lora").error().p() + return (model, clip) + + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + + return model_lora, clip_lora + + def validate_lora_format(self, lora_string): + if lora_string is None: + return None + if not re.match(r'^$', lora_string): + ttNl(f'{lora_string}').t("Skipping invalid lora format").error().p() + return None + + return lora_string + + def parse_lora_string(self, lora_string): + # Remove '' from the end, then split by ':' + parts = lora_string[6:-1].split(':') # 6 is the length of ' 0 else None + weight1 = float(parts[1]) if len(parts) > 1 else None + weight2 = float(parts[2]) if len(parts) > 2 else weight1 + return lora_name, weight1, weight2 + + def load_lora_text(self, loras, model, clip): + # Extract potential patterns + pattern = r']+>' + matches = re.findall(pattern, loras) + + # Validate each extracted pattern + for match in matches: + match = self.validate_lora_format(match) + if match is not None: + lora_name, weight1, weight2 = self.parse_lora_string(match) + + if lora_name not in self.loraDict: + ttNl(f'{lora_name}').t("Skipping unknown lora").error().p() + continue + + lora_name = self.loraDict.get(lora_name, lora_name) + model, clip = self.load_lora(lora_name, model, clip, weight1, weight2) + + return model, clip + + def embedding_encode(self, text, token_normalization, weight_interpretation, clip, seed=None, title=None, my_unique_id=None, prepend_text=None, zero_out=False): + text = f'{prepend_text} {text}' if prepend_text is not None else text + if seed is None: + seed = self.string_to_seed(text) + + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + embedding, pooled = advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled='enable') + conditioning = [[embedding, {"pooled_output": pooled}]] + + if zero_out is True and text.strip() == '': + return ConditioningZeroOut().zero_out(conditioning)[0] + else: + return conditioning + + def embedding_encodeXL(self, text, clip, seed=0, title=None, my_unique_id=None, prepend_text=None, text2=None, prepend_text2=None, width=None, height=None, crop_width=0, crop_height=0, target_width=None, target_height=None, refiner_clip=None, ascore=None): + text = f'{prepend_text} {text}' if prepend_text is not None else text + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + target_width = target_width if target_width is not None else width + target_height = target_height if target_height is not None else height + + if text2 is not None and refiner_clip is not None: + text2 = f'{prepend_text2} {text2}' if prepend_text2 is not None else text2 + text2 = self.nsp_parse(text2, seed, title=title, my_unique_id=my_unique_id) + + tokens_refiner = refiner_clip.tokenize(text2) + cond_refiner, pooled_refiner = refiner_clip.encode_from_tokens(tokens_refiner, return_pooled=True) + refiner_conditioning = [[cond_refiner, {"pooled_output": pooled_refiner, "aesthetic_score": ascore, "width": width,"height": height}]] + else: + refiner_conditioning = None + + if text2 is None or text2.strip() == '': + text2 = text + + tokens = clip.tokenize(text) + tokens["l"] = clip.tokenize(text2)["l"] + if len(tokens["l"]) != len(tokens["g"]): + empty = clip.tokenize("") + while len(tokens["l"]) < len(tokens["g"]): + tokens["l"] += empty["l"] + while len(tokens["l"]) > len(tokens["g"]): + tokens["g"] += empty["g"] + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + conditioning = [[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_width, "crop_h": crop_height, "target_width": target_width, "target_height": target_height}]] + + return conditioning, refiner_conditioning + + def load_main3(self, ckpt_name, config_name, vae_name, loras, clip_skip, model_override=None, clip_override=None, optional_lora_stack=None, unique_id=None): + cache = self.loader_cache.get(f'loader{unique_id}', None) + + model = "override" if model_override is not None else None + clip = "override" if clip_override is not None else None + vae = None + + if cache is not None and cache[0] == ckpt_name and cache[1] == config_name and cache[2] == vae_name and model is None and clip is None: + # Load from cache if it's the same + model = cache[3] + clip = cache[4] + vae = cache[5] + elif model is None or clip is None: + self.loader_cache.pop(f'loader{unique_id}', None) + + # Load normally + output_vae, output_clip = True, True + + if vae_name != "Baked VAE": + output_vae = False + if clip not in [None, "None", "override"]: + output_clip = False + + model, clip, vae = self.load_checkpoint(ckpt_name, config_name, clip_skip, output_vae, output_clip) + + if vae is None: + if vae_name != "Baked VAE": + vae = self.load_vae(vae_name) + else: + _, _, vae = self.load_checkpoint(ckpt_name, config_name, clip_skip, output_vae=True, output_clip=False) + + if unique_id is not None and model != "override" and clip != "override": + self.loader_cache[f'loader{unique_id}'] = [ckpt_name, config_name, vae_name, model, clip, vae] + + if model_override is not None: + self.loader_cache.pop(f'loader{unique_id}', None) + model = model_override + del model_override + + if clip_override is not None: + clip = clip_override.clone() + + if clip_skip != 0: + if sampler.get_model_type(model) in ['FLUX', 'FLOW']: + raise Exception('FLOW and FLUX do not support clip_skip. Set clip_skip to 0.') + clip.clip_layer(clip_skip) + del clip_override + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = self.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if loras not in [None, "None"]: + model, clip = self.load_lora_text(loras, model, clip) + + if not clip: + raise Exception("No CLIP found") + + return model, clip, vae + +class ttNsampler: + def __init__(self): + self.last_helds: dict[str, list] = { + "results": [], + "pipe_line": [], + } + self.device = comfy.model_management.intermediate_device() + + @staticmethod + def tensor2pil(image: torch.Tensor) -> Image.Image: + """Convert a torch tensor to a PIL image.""" + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + + @staticmethod + def pil2tensor(image: Image.Image) -> torch.Tensor: + """Convert a PIL image to a torch tensor.""" + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + @staticmethod + def enforce_mul_of_64(d): + d = int(d) + if d<=7: + d = 8 + leftover = d % 8 # 8 is the number of pixels per byte + if leftover != 0: # if the number of pixels is not a multiple of 8 + if (leftover < 4): # if the number of pixels is less than 4 + d -= leftover # remove the leftover pixels + else: # if the number of pixels is more than 4 + d += 8 - leftover # add the leftover pixels + + return int(d) + + @staticmethod + def safe_split(to_split: str, delimiter: str) -> List[str]: + """Split the input string and return a list of non-empty parts.""" + parts = to_split.split(delimiter) + parts = [part for part in parts if part not in ('', ' ', ' ')] + + while len(parts) < 2: + parts.append('None') + return parts + + @staticmethod + def get_model_type(model): + base: BaseModel = model.model + return str(base.model_type).split('.')[1].strip() + + def emptyLatent(self, empty_latent_aspect: str, batch_size:int, width:int = None, height:int = None, sd3: bool = False) -> torch.Tensor: + if empty_latent_aspect and empty_latent_aspect != "width x height [custom]": + width, height = empty_latent_aspect.replace(' ', '').split('[')[0].split('x') + + if sd3: + latent = torch.ones([batch_size, 16, int(height) // 8, int(width) // 8], device=self.device) * 0.0609 + else: + latent = torch.zeros([batch_size, 4, int(height) // 8, int(width) // 8], device=self.device) + + return latent + + def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): + latent_image = latent["samples"] + + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + batch_inds = latent["batch_index"] if "batch_index" in latent else None + noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + if preview_latent: + callback = latent_preview.prepare_callback(model, steps) + else: + callback = None + + disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED + + if scheduler not in CUSTOM_SCHEDULERS: + samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, + force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + else: + sampler = comfy.samplers.sampler_object(sampler_name) + + if scheduler.startswith("AYS"): + from comfy_extras.nodes_align_your_steps import AlignYourStepsScheduler + + model_type = scheduler.split(' ')[1] + sigmas = AlignYourStepsScheduler().get_sigmas(model_type, steps, denoise)[0] + elif scheduler.startswith("GITS"): + from comfy_extras.nodes_gits import GITSScheduler + + sigmas = GITSScheduler().get_sigmas(1.2, steps, denoise)[0] + + samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + + out = latent.copy() + out["samples"] = samples + return out + + def upscale(self, samples, upscale_method, scale_by, crop): + s = samples.copy() + width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) + height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) + + if (width > MAX_RESOLUTION): + width = MAX_RESOLUTION + if (height > MAX_RESOLUTION): + height = MAX_RESOLUTION + + s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) + return (s,) + + def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool, + upscale_model_name: str=None, vae: VAE=None, images: np.ndarray=None, rescale: str=None, percent: float=None, width: int=None, height: int=None, longer_side: int=None) -> dict: + """Upscale the samples if the upscale_method is not set to 'None'.""" + upscale_method = upscale_method.split(' ', 1) + + # Upscale samples if enabled + if upscale_method[0] == "[latent]": + if upscale_method[1] != "None": + samples = self.upscale(samples, upscale_method[1], factor, crop)[0] + + if upscale_method[0] == "[hiresFix]": + if (images is None): + images = vae.decode(samples["samples"]) + hiresfix = ttN_modelScale() + if upscale_model_name == "None": + raise ValueError("Unable to model upscale. Please install an upscale model and try again.") + samples = hiresfix.upscale(upscale_model_name, vae, images, True if rescale != 'None' else False, upscale_method[1], rescale, percent, width, height, longer_side, crop, "return latent", None, True) + + return samples + + def get_output(self, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input pipe dictionary.""" + return ( + pipe, + pipe.get("model"), + pipe.get("positive"), + pipe.get("negative"), + pipe.get("samples"), + pipe.get("vae"), + pipe.get("clip"), + pipe.get("images"), + pipe.get("seed") + ) + + def get_output_sdxl(self, sdxl_pipe: dict, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" + return ( + sdxl_pipe, + pipe, + sdxl_pipe.get("model"), + sdxl_pipe.get("positive"), + sdxl_pipe.get("negative"), + sdxl_pipe.get("refiner_model"), + sdxl_pipe.get("refiner_positive"), + sdxl_pipe.get("refiner_negative"), + sdxl_pipe.get("samples"), + sdxl_pipe.get("vae"), + sdxl_pipe.get("clip"), + sdxl_pipe.get("images"), + sdxl_pipe.get("seed") + ) + +class ttNadv_xyPlot: + def __init__(self, adv_xyPlot, unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor): + self.executor = executor + self.unique_id = str(unique_id) + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.save_prefix = save_prefix + self.image_output = image_output + + self.latent_list = [] + self.image_list = [] + self.ui_list = [] + + self.adv_xyPlot = adv_xyPlot + self.x_points = adv_xyPlot.get("x_plot", None) + self.y_points = adv_xyPlot.get("y_plot", None) + self.z_points = adv_xyPlot.get("z_plot", None) + self.save_individuals = adv_xyPlot.get("save_individuals", False) + self.image_output = prompt[str(unique_id)]["inputs"]["image_output"] + self.invert_bg = adv_xyPlot.get("invert_bg", False) + self.x_labels = [] + self.y_labels = [] + self.z_labels = [] + + self.grid_spacing = adv_xyPlot["grid_spacing"] + self.max_width, self.max_height = 0, 0 + self.num_cols = len(self.x_points) if self.x_points else 1 + self.num_rows = len(self.y_points) if self.y_points else 1 + + self.num = 0 + self.total = (self.num_cols if self.num_cols > 0 else 1) * (self.num_rows if self.num_rows > 0 else 1) + + def reset(self): + self.executor.reset() + self.executor = None + self.clear_caches() + + def clear_caches(self): + self.latent_list = [] + self.image_list = [] + self.ui_list = [] + self.num = 0 + + @staticmethod + def get_font(font_size): + font = None + if os.path.exists(ttNpaths.font_path): + try: + font = ImageFont.truetype(str(Path(ttNpaths.font_path)), font_size) + except: + pass + + if font is None: + font = ImageFont.load_default(font_size) + + return font + + @staticmethod + def rearrange_tensors(latent, num_cols, num_rows): + new_latent = [] + for i in range(num_rows): + for j in range(num_cols): + index = j * num_rows + i + new_latent.append(latent[index]) + return new_latent + + @staticmethod + def _get_nodes_to_keep(nodeID, prompt): + nodes_to_keep = OrderedDict([(nodeID, None)]) + + toCheck = [nodeID] + + while toCheck: + current_node_id = toCheck.pop() + current_node = prompt[current_node_id] + + for input_key in current_node["inputs"]: + value = current_node["inputs"][input_key] + + if isinstance(value, list) and len(value) == 2: + input_node_id = value[0] + + if input_node_id not in nodes_to_keep: + nodes_to_keep[input_node_id] = None + toCheck.append(input_node_id) + + return list(reversed(list(nodes_to_keep.keys()))) + + def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=21): + label_width = img.width if is_x_label else img.height + + font_size = self.adjust_font_size(text, initial_font_size, label_width) + font_size = min(max_font_size, font_size) + font_size = max(min_font_size, font_size) + + if self.invert_bg: + fill_color = 'white' + else: + fill_color = 'black' + + label_bg = Image.new('RGBA', (label_width, 0), color=(0, 0, 0, 0)) # Temporary height + d = ImageDraw.Draw(label_bg) + + font = self.get_font(font_size) + + def split_text_into_lines(text, font, label_width): + words = text.split() + if words == []: + return ['None'] + lines = [] + current_line = words[0] + for word in words[1:]: + try: + if d.textsize(f"{current_line} {word}", font=font)[0] <= label_width: + current_line += " " + word + else: + lines.append(current_line) + current_line = word + except: + if d.textlength(f"{current_line} {word}", font=font) <= label_width: + current_line += " " + word + else: + lines.append(current_line) + current_line = word + lines.append(current_line) + return lines + + lines = split_text_into_lines(text, font, label_width) + + line_height = int(font_size * 1.2) # Increased line height for spacing + label_height = len(lines) * line_height + + label_bg = Image.new('RGBA', (label_width, label_height), color=(0, 0, 0, 0)) + d = ImageDraw.Draw(label_bg) + + current_y = 0 + for line in lines: + try: + text_width, _ = d.textsize(line, font=font) + except: + text_width = d.textlength(line, font=font) + text_x = (label_width - text_width) // 2 + text_y = current_y + current_y += line_height + d.text((text_x, text_y), line, fill=fill_color, font=font) + + return label_bg + + def calculate_background_dimensions(self): + border_size = int((self.max_width//8)*1.5) if self.y_points is not None or self.x_points is not None else 0 + bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * (self.y_points != None) + bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * (self.x_points != None) + border_size * (self.z_points["1"]["label"] != None) + + x_offset_initial = border_size if self.y_points is not None else 0 + y_offset = border_size if self.x_points is not None else 0 + + return bg_width, bg_height, x_offset_initial, y_offset + + def get_relevant_prompt(self): + nodes_to_keep = self._get_nodes_to_keep(self.unique_id, self.prompt) + new_prompt = {node_id: self.prompt[node_id] for node_id in nodes_to_keep} + + if self.save_individuals == True: + if self.image_output in ["Hide", "Hide/Save"]: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Hide/Save" + else: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Save" + elif self.image_output in ["Preview", "Save"]: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Preview" + else: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Hide" + + return new_prompt + + def plot_images(self, z_label): + bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() + + if self.invert_bg: + bg_color = (0, 0, 0, 255) + else: + bg_color = (255, 255, 255, 255) + + background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=bg_color) + + for row_index in range(self.num_rows): + x_offset = x_offset_initial + + for col_index in range(self.num_cols): + index = col_index * self.num_rows + row_index + img = self.image_list[index] + background.paste(img, (x_offset, y_offset)) + + # Handle X label + if row_index == 0 and self.x_points is not None: + label_bg = self.create_label(img, self.x_labels[col_index], int(48 * img.width / 512)) + label_y = (y_offset - label_bg.height) // 2 + background.alpha_composite(label_bg, (x_offset, label_y)) + + # Handle Y label + if col_index == 0 and self.y_points is not None: + label_bg = self.create_label(img, self.y_labels[row_index], int(48 * img.height / 512), False) + label_bg = label_bg.rotate(90, expand=True) + + label_x = (x_offset - label_bg.width) // 2 + label_y = y_offset + (img.height - label_bg.height) // 2 + background.alpha_composite(label_bg, (label_x, label_y)) + + # Handle Z label + if z_label is not None: + label_bg = self.create_label(background, z_label, int(48 * img.height / 512)) + label_y = background.height - label_bg.height - (label_bg.height) // 2 + background.alpha_composite(label_bg, (0, label_y)) + + x_offset += img.width + self.grid_spacing + + y_offset += img.height + self.grid_spacing + + return sampler.pil2tensor(background) + + def adjust_font_size(self, text, initial_font_size, label_width): + font = self.get_font(initial_font_size) + left, top, right, bottom = font.getbbox(text) + text_width = right - left + + scaling_factor = 0.9 + if text_width > (label_width * scaling_factor): + return int(initial_font_size * (label_width / text_width) * scaling_factor) + else: + return initial_font_size + + def execute_prompt(self, prompt, extra_data, x_label, y_label, z_label): + prompt_id = uuid.uuid4() + + # Try to get the current event loop + try: + loop = asyncio.get_event_loop() + except RuntimeError: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + if loop.is_running(): + # Already inside an event loop (e.g. some backends or async-enabled ComfyUI) + import threading + + result_container = {} + + def run_coroutine(): + coro = execution.validate_prompt(prompt_id, prompt, None) + result_container["result"] = asyncio.run(coro) + + thread = threading.Thread(target=run_coroutine) + thread.start() + thread.join() + + valid = result_container["result"] + else: + # Safe to run directly + valid = loop.run_until_complete(execution.validate_prompt(prompt_id, prompt, None)) + + if valid[0]: + ttNl(f'{CC.GREY}X: {x_label}, Y: {y_label} Z: {z_label}').t(f'Plot Values {self.num}/{self.total} ->').p() + + self.executor.execute(prompt, self.num, extra_data, valid[2]) + + if len(self.executor.outputs.get(self.unique_id, [])) > 2: + self.latent_list.append(self.executor.outputs[self.unique_id][-6][0]["samples"]) + + image = self.executor.outputs[self.unique_id][-3][0] + else: + current_node = prompt[self.unique_id] + input_link = current_node["inputs"]["image"] + + image = self.executor.outputs[input_link[0]][input_link[1]][0] + + pil_image = ttNsampler.tensor2pil(image) + self.image_list.append(pil_image) + + self.max_width = max(self.max_width, pil_image.width) + self.max_height = max(self.max_height, pil_image.height) + else: + raise Exception(valid[1]) + + @staticmethod + def _parse_value(input_name, value, node_inputs, input_types, regex): + # append mode + if '.append' in input_name: + input_name = input_name.replace('.append', '') + value = node_inputs[input_name] + ' ' + value + + # Search and Replace + matches = regex.findall(value) + if matches: + value = node_inputs[input_name] + for search, replace in matches: + pattern = re.compile(re.escape(search), re.IGNORECASE) + value = pattern.sub(replace, value) + + # set value to correct type + for itype in ['required', 'optional']: + for iname in input_types.get(itype) or []: + if iname == input_name: + ivalues = input_types[itype][iname] + if ivalues[0] == 'INT': + value = int(float(value)) + elif ivalues[0] == 'FLOAT': + value = float(value) + elif ivalues[0] in ['BOOL', 'BOOLEAN']: + if value.lower() == 'true': + value = True + elif value.lower() == 'false': + value = False + value = bool(value) + elif type(ivalues[0]) == list: + if value not in ivalues[0]: + raise KeyError(f'"{value}" not a valid value for input "{iname}" in xyplot') + + return input_name, value + + def xy_plot_process(self): + if self.x_points is None and self.y_points is None: + return None, None, None, + + regex = re.compile(r'%(.*?);(.*?)%') + + x_label, y_label, z_label = None, None, None + base_prompt = self.get_relevant_prompt() + + if self.z_points is None: + self.z_points = {'1': {'label': None}} + + plot_images = [] + pil_images = [] + images = [] + latents = [] + + def update_prompt(prompt, nodes): + for node_id, inputs in nodes.items(): + if node_id == 'label': + continue + try: + node_inputs = prompt[node_id]["inputs"] + except KeyError: + raise KeyError(f'Node with ID: [{node_id}] not found in prompt for xyPlot') + class_type = prompt[node_id]["class_type"] + class_def = COMFY_CLASS_MAPPINGS[class_type] + input_types = class_def.INPUT_TYPES() + + for input_name, value in inputs.items(): + input_name, value = self._parse_value(input_name, value, node_inputs, input_types, regex) + node_inputs[input_name] = value + + return prompt + + def execute_y_plot(prompt, x_label, z_label): + for _, nodes in self.y_points.items(): + y_label = nodes["label"] + self.y_labels.append(y_label) + y_prompt = copy.deepcopy(prompt) + y_prompt = update_prompt(y_prompt, nodes) + + self.num += 1 + self.execute_prompt(y_prompt, self.extra_pnginfo, x_label, y_label, z_label) + + for _, nodes in self.z_points.items(): + z_label = nodes["label"] + z_prompt = copy.deepcopy(base_prompt) + z_prompt = update_prompt(z_prompt, nodes) + + if self.x_points: + for _, nodes in self.x_points.items(): + x_label = nodes["label"] + self.x_labels.append(x_label) + x_prompt = copy.deepcopy(z_prompt) + x_prompt = update_prompt(x_prompt, nodes) + + if self.y_points: + execute_y_plot(x_prompt, x_label, z_label) + else: + self.num += 1 + self.execute_prompt(x_prompt, self.extra_pnginfo, x_label, y_label, z_label) + + elif self.y_points: + execute_y_plot(z_prompt, None, z_label) + + # Rearrange latent array to match preview image grid + if len(self.latent_list) > 0: + latents.extend(self.rearrange_tensors(self.latent_list, self.num_cols, self.num_rows)) + + # Plot images + plot_images.append(self.plot_images(z_label)) + + # Rearrange images for outputs + pil_images.extend(self.rearrange_tensors(self.image_list, self.num_cols, self.num_rows)) + + self.clear_caches() + + # Concatenate the tensors along the first dimension (dim=0) + if len(latents) > 0: + latents = torch.cat(latents, dim=0) + + for image in pil_images: + images.append(sampler.pil2tensor(image)) + + plot_out = torch.cat(plot_images, dim=0) + images_out = torch.cat(images, dim=0) + samples = {"samples": latents} + + return plot_out, images_out, samples + +class ttNsave: + def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): + self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None + self.overwrite_existing = overwrite_existing + self.my_unique_id = my_unique_id + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.type = 'temp' + self.output_dir = output_dir + if self.output_dir != folder_paths.get_temp_directory(): + self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) + if not os.path.exists(self.output_dir): + self._create_directory(self.output_dir) + + @staticmethod + def _create_directory(folder: str): + """Try to create the directory and log the status.""" + ttNl(f"Folder {folder} does not exist. Attempting to create...").warn().p() + if not os.path.exists(folder): + try: + os.makedirs(folder) + ttNl(f"{folder} Created Successfully").success().p() + except OSError: + ttNl(f"Failed to create folder {folder}").error().p() + pass + + @staticmethod + def _map_filename(filename: str, filename_prefix: str) -> Tuple[int, str, Optional[int]]: + """Utility function to map filename to its parts.""" + + # Get the prefix length and extract the prefix + prefix_len = len(os.path.basename(filename_prefix)) + prefix = filename[:prefix_len] + + # Search for the primary digits + digits = re.search(r'(\d+)', filename[prefix_len:]) + + # Search for the number in brackets after the primary digits + group_id = re.search(r'\((\d+)\)', filename[prefix_len:]) + + return (int(digits.group()) if digits else 0, prefix, int(group_id.group(1)) if group_id else 0) + + @staticmethod + def _format_date(text: str, date: datetime.datetime) -> str: + """Format the date according to specific patterns.""" + date_formats = { + 'd': lambda d: d.day, + 'dd': lambda d: '{:02d}'.format(d.day), + 'M': lambda d: d.month, + 'MM': lambda d: '{:02d}'.format(d.month), + 'h': lambda d: d.hour, + 'hh': lambda d: '{:02d}'.format(d.hour), + 'm': lambda d: d.minute, + 'mm': lambda d: '{:02d}'.format(d.minute), + 's': lambda d: d.second, + 'ss': lambda d: '{:02d}'.format(d.second), + 'y': lambda d: d.year, + 'yy': lambda d: str(d.year)[2:], + 'yyy': lambda d: str(d.year)[1:], + 'yyyy': lambda d: d.year, + } + + # We need to sort the keys in reverse order to ensure we match the longest formats first + for format_str in sorted(date_formats.keys(), key=len, reverse=True): + if format_str in text: + text = text.replace(format_str, str(date_formats[format_str](date))) + return text + + @staticmethod + def _gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '', collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[str, Union[str, List[str]]]: + """Recursively gather all inputs from the prompt dictionary.""" + if prompt == None: + return None + + collected_inputs = collected_inputs or {} + prompt_inputs = prompt[str(unique_id)]["inputs"] + + for p_input, p_input_value in prompt_inputs.items(): + a_input = f"{linkInput}>{p_input}" if linkInput else p_input + + if isinstance(p_input_value, list): + ttNsave._gather_all_inputs(prompt, p_input_value[0], a_input, collected_inputs) + else: + existing_value = collected_inputs.get(a_input) + if existing_value is None: + collected_inputs[a_input] = p_input_value + elif p_input_value not in existing_value: + collected_inputs[a_input] = existing_value + "; " + p_input_value + + return collected_inputs + + @staticmethod + def _get_filename_with_padding(output_dir, filename, number_padding, group_id, ext): + """Return filename with proper padding.""" + try: + filtered = list(filter(lambda a: a[1] == filename, map(lambda x: ttNsave._map_filename(x, filename), os.listdir(output_dir)))) + last = max(filtered)[0] + + for f in filtered: + if f[0] == last: + if f[2] == 0 or f[2] == group_id: + last += 1 + counter = last + except (ValueError, FileNotFoundError): + os.makedirs(output_dir, exist_ok=True) + counter = 1 + + if group_id == 0: + return f"{filename}.{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}.{ext}" + else: + return f"{filename}_({group_id}).{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}_({group_id}).{ext}" + + @staticmethod + def filename_parser(output_dir: str, filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str, number_padding: int, group_id: int, ext: str) -> str: + """Parse the filename using provided patterns and replace them with actual values.""" + filename = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), filename_prefix) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + #filename = re.sub(r'%(.*?)\s*(?::(\d+))?%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))[:int(m.group(2)) if m.group(2) else len(str(all_inputs.get(m.group(1), '')))]), filename) + + filename = re.sub(r'%(.*?)%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))), filename) + + subfolder = os.path.dirname(os.path.normpath(filename)) + filename = os.path.basename(os.path.normpath(filename)) + + output_dir = os.path.join(output_dir, subfolder) + + filename = re.sub(r'[^a-zA-Z0-9_\-\. ]', '', filename)[:240-len(ext)] + filename = ttNsave._get_filename_with_padding(output_dir, filename, number_padding, group_id, ext) + + return filename, subfolder + + @staticmethod + def folder_parser(output_dir: str, prompt: Dict[str, dict], my_unique_id: str): + output_dir = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), output_dir) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + return re.sub(r'%(.*?)%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))), output_dir) + #return re.sub(r'%(.*?)\s*(?::(\d+))?%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))[:int(m.group(2)) if m.group(2) else len(str(all_inputs.get(m.group(1), '')))]), output_dir) + + def images(self, images, filename_prefix, output_type, embed_workflow=True, ext="png", group_id=0): + FORMAT_MAP = { + "png": "PNG", + "jpg": "JPEG", + "jpeg": "JPEG", + "bmp": "BMP", + "tif": "TIFF", + "tiff": "TIFF", + "webp": "WEBP", + } + + if ext not in FORMAT_MAP: + raise ValueError(f"Unsupported file extension {ext}") + + if output_type in ("Hide", "Disabled"): + return list() + if output_type in ("Save", "Hide/Save"): + output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() + self.type = "output" + if output_type == "Preview": + output_dir = folder_paths.get_temp_directory() + filename_prefix = 'ttNpreview' + ext = "png" + + results=list() + for image in images: + img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) + + filename = filename_prefix.replace("%width%", str(img.size[0])).replace("%height%", str(img.size[1])) + + filename, subfolder = ttNsave.filename_parser(output_dir, filename, self.prompt, self.my_unique_id, self.number_padding, group_id, ext) + + file_path = os.path.join(output_dir, subfolder, filename) + + if (embed_workflow in (True, "True")) and (ext in ("png", "webp")): + if ext == "png": + metadata = PngInfo() + if self.prompt is not None: + metadata.add_text("prompt", json.dumps(self.prompt)) + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + metadata.add_text(x, json.dumps(self.extra_pnginfo[x])) + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, pnginfo=metadata, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + if ext == "webp": + img_exif = img.getexif() + workflow_metadata = '' + prompt_str = '' + if self.prompt is not None: + prompt_str = json.dumps(self.prompt) + img_exif[0x010f] = "Prompt:" + prompt_str + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + workflow_metadata += json.dumps(self.extra_pnginfo[x]) + + img_exif[0x010e] = "Workflow:" + workflow_metadata + exif_data = img_exif.tobytes() + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, exif=exif_data, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + else: + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + results.append({ + "filename": file_path, + "subfolder": subfolder, + "type": self.type + }) + + return results + + def textfile(self, text, filename_prefix, ext='txt'): + output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() + + filename, subfolder = ttNsave.filename_parser(output_dir, filename_prefix, self.prompt, self.my_unique_id, self.number_padding, 0, ext) + + file_path = os.path.join(output_dir, subfolder, filename) + + if self.overwrite_existing or not os.path.isfile(file_path): + with open(file_path, 'w') as f: + f.write(text) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + +loader = ttNloader() +sampler = ttNsampler() + +#---------------------------------------------------------------ttN/pipe START----------------------------------------------------------------------# +class ttN_pipeLoader_v2: + version = '2.1.0' + @classmethod + def INPUT_TYPES(cls): + aspect_ratios = ["width x height [custom]", + "512 x 512 [S] 1:1", + "768 x 768 [S] 1:1", + "910 x 910 [S] 1:1", + + "512 x 682 [P] 3:4", + "512 x 768 [P] 2:3", + "512 x 910 [P] 9:16", + + "682 x 512 [L] 4:3", + "768 x 512 [L] 3:2", + "910 x 512 [L] 16:9", + + "512 x 1024 [P] 1:2", + "1024 x 512 [L] 2:1", + "1024 x 1024 [S] 1:1", + ] + + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), + + "loras": ("STRING", {"placeholder": "", "multiline": True}), + + "positive": ("STRING", {"default": "Positive","multiline": True, "dynamicPrompts": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True, "dynamicPrompts": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "empty_latent_aspect": (aspect_ratios, {"default":"512 x 512 [S] 1:1"}), + "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": { + "model_override": ("MODEL",), + "clip_override": ("CLIP",), + "optional_lora_stack": ("LORA_STACK",), + "optional_controlnet_stack": ("CONTROL_NET_STACK",), + "prepend_positive": ("STRING", {"forceInput": True}), + "prepend_negative": ("STRING", {"forceInput": True}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_pipeLoader_v2.version, "my_unique_id": "UNIQUE_ID",} + } + + RETURN_TYPES = ("PIPE_LINE" ,"MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "INT", "INT", "INT", "STRING", "STRING") + RETURN_NAMES = ("pipe","model", "positive", "negative", "latent", "vae", "clip", "seed", "width", "height", "pos_string", "neg_string") + + FUNCTION = "adv_pipeloader" + CATEGORY = "🌏 tinyterra/pipe" + + def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, + loras, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, + empty_latent_aspect, empty_latent_width, empty_latent_height, batch_size, seed, + model_override=None, clip_override=None, optional_lora_stack=None, optional_controlnet_stack=None, prepend_positive=None, prepend_negative=None, + prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + loader.clear_cache(prompt) + model, clip, vae = loader.load_main3(ckpt_name, config_name, vae_name, loras, clip_skip, model_override, clip_override, optional_lora_stack, my_unique_id) + + # Create Empty Latent + sd3 = True if sampler.get_model_type(model) in ['FLUX', 'FLOW'] else False + latent = sampler.emptyLatent(empty_latent_aspect, batch_size, empty_latent_width, empty_latent_height, sd3) + samples = {"samples":latent} + + positive_embedding = loader.embedding_encode(positive, positive_token_normalization, positive_weight_interpretation, clip, seed=seed, title='pipeLoader Positive', my_unique_id=my_unique_id, prepend_text=prepend_positive) + negative_embedding = loader.embedding_encode(negative, negative_token_normalization, negative_weight_interpretation, clip, seed=seed, title='pipeLoader Negative', my_unique_id=my_unique_id, prepend_text=prepend_negative) + + if optional_controlnet_stack is not None and len(optional_controlnet_stack) > 0: + for cnt in optional_controlnet_stack: + positive_embedding, negative_embedding = loader.load_controlNet(positive_embedding, negative_embedding, cnt[0], cnt[1], cnt[2], cnt[3], cnt[4]) + + image = None + + pipe = {"model": model, + "positive": positive_embedding, + "negative": negative_embedding, + "vae": vae, + "clip": clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": None, + } + + final_positive = (prepend_positive + ' ' if prepend_positive else '') + (positive + ' ' if positive else '') + final_negative = (prepend_negative + ' ' if prepend_negative else '') + (negative + ' ' if negative else '') + + return (pipe, model, positive_embedding, negative_embedding, samples, vae, clip, seed, empty_latent_width, empty_latent_height, final_positive, final_negative) + +class ttN_pipeKSampler_v2: + version = '2.3.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_pipeKSampler_v2.version}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/pipe" + + def sample(self, pipe, + lora_name, lora_strength, + steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, denoise=1.0, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, input_image_override=None, + seed=None, adv_xyPlot=None, upscale_model_name=None, upscale_method=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, + prompt=None, extra_pnginfo=None, my_unique_id=None, start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + samp_model = optional_model if optional_model is not None else pipe["model"] + samp_positive = optional_positive if optional_positive is not None else pipe["positive"] + samp_negative = optional_negative if optional_negative is not None else pipe["negative"] + samp_samples = optional_latent if optional_latent is not None else pipe["samples"] + samp_images = input_image_override if input_image_override is not None else pipe["images"] + samp_vae = optional_vae if optional_vae is not None else pipe["vae"] + samp_clip = optional_clip if optional_clip is not None else pipe["clip"] + + if seed in (None, 'undefined'): + samp_seed = pipe["seed"] + else: + samp_seed = seed + + del pipe + + def process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise): + # Load Lora + if lora_name not in (None, "None"): + samp_model, samp_clip = loader.load_lora(lora_name, samp_model, samp_clip, lora_model_strength, lora_clip_strength) + + # Upscale samples if enabled + if upscale_method != "None": + samp_samples = sampler.handle_upscale(samp_samples, upscale_method, factor, crop, upscale_model_name, samp_vae, samp_images, rescale, percent, width, height, longer_side) + + samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + results = list() + if (image_output != "Disabled"): + # Save images + latent = samp_samples["samples"] + samp_images = samp_vae.decode(latent) + + results = ttN_save.images(samp_images, save_prefix, image_output, embed_workflow, file_type) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samp_samples, + "images": samp_images, + "seed": samp_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (*sampler.get_output(new_pipe), None) + + return {"ui": {"images": results}, + "result": (*sampler.get_output(new_pipe), None)} + + def process_xyPlot(samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot): + + random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + if samples is None and images is None: + return process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samples, + "images": images, + "seed": samp_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save"): + return (*sampler.get_output(new_pipe), plot_image) + + return {"ui": {"images": plot_result}, "result": (*sampler.get_output(new_pipe), plot_image)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save", "Disabled"): + preview_latent = False + + if adv_xyPlot is None: + return process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_strength, lora_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent) + else: + return process_xyPlot(samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_strength, lora_strength, steps, cfg, sampler_name, + scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot) + +class ttN_pipeKSamplerAdvanced_v2: + version = '2.3.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "add_noise": (["enable", "disable"], ), + "noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "return_with_leftover_noise": (["disable", "enable"], ), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": { + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_pipeKSamplerAdvanced_v2.version + }, + } + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "adv_sample" + CATEGORY = "🌏 tinyterra/pipe" + + def adv_sample(self, pipe, + lora_name, lora_strength, + add_noise, steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, noise, + noise_seed=None, optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, input_image_override=None, adv_xyPlot=None, upscale_method=None, upscale_model_name=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, start_at_step=None, end_at_step=None, return_with_leftover_noise=False): + + force_full_denoise = True + if return_with_leftover_noise == "enable": + force_full_denoise = False + + disable_noise = False + if add_noise == "disable": + disable_noise = True + + return ttN_pipeKSampler_v2.sample(self, pipe, lora_name, lora_strength, steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, noise, + optional_model, optional_positive, optional_negative, optional_latent, optional_vae, optional_clip, input_image_override, noise_seed, adv_xyPlot, upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, prompt, extra_pnginfo, my_unique_id, start_at_step, end_at_step, force_full_denoise, disable_noise) + +class ttN_pipeLoaderSDXL_v2: + version = '2.1.0' + @classmethod + def INPUT_TYPES(cls): + aspect_ratios = ["width x height [custom]", + "1024 x 1024 [S] 1:1", + + "640 x 1536 [P] 9:21", + "704 x 1472 [P] 9:19", + "768 x 1344 [P] 9:16", + "768 x 1216 [P] 5:8", + "832 x 1216 [P] 2:3", + "896 x 1152 [P] 3:4", + + "1536 x 640 [L] 21:9", + "1472 x 704 [L] 19:9", + "1344 x 768 [L] 16:9", + "1216 x 768 [L] 8:5", + "1216 x 832 [L] 3:2", + "1152 x 896 [L] 4:3", + ] + relative_ratios = ["width x height [custom]", + "1x Empty Latent Aspect", + "2x Empty Latent Aspect", + "3x Empty Latent Aspect", + "4x Empty Latent Aspect", + "5x Empty Latent Aspect", + "6x Empty Latent ASpect", + "7x Empty Latent Aspect", + "8x Empty Latent Aspect", + ] + + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -2, "min": -24, "max": 0, "step": 1}), + + "loras": ("STRING", {"placeholder": "Loras - ", "multiline": True}), + + "refiner_ckpt_name": (["None"] + folder_paths.get_filename_list("checkpoints"), ), + "refiner_config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + + "positive_g": ("STRING", {"placeholder": "Linguistic Positive (positive_g)","multiline": True, "dynamicPrompts": True}), + "positive_l": ("STRING", {"placeholder": "Supporting Terms (positive_l)", "multiline": True, "dynamicPrompts": True}), + "negative_g": ("STRING", {"placeholder": "negative_g", "multiline": True, "dynamicPrompts": True}), + "negative_l": ("STRING", {"placeholder": "negative_l", "multiline": True, "dynamicPrompts": True}), + + "conditioning_aspect": (relative_ratios, {"default": "1x Empty Latent Aspect"}), + "conditioning_width": ("INT", {"default": 2048.0, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "conditioning_height": ("INT", {"default": 2048.0, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + + "crop_width": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), + "crop_height": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), + + "target_aspect": (relative_ratios, {"default": "1x Empty Latent Aspect"}), + "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + + "positive_ascore": ("INT", {"default": 6.0, "min": 0, "step": 0.1}), + "negative_ascore": ("INT", {"default": 2.0, "min": 0, "step": 0.1}), + + "empty_latent_aspect": (aspect_ratios, {"default": "1024 x 1024 [S] 1:1"}), + "empty_latent_width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": { + "model_override": ("MODEL",), + "clip_override": ("CLIP",), + "optional_lora_stack": ("LORA_STACK",), + "optional_controlnet_stack": ("CONTROL_NET_STACK",), + "refiner_model_override": ("MODEL",), + "refiner_clip_override": ("CLIP",), + "prepend_positive_g": ("STRING", {"forceInput": True}), + "prepend_positive_l": ("STRING", {"forceInput": True}), + "prepend_negative_g": ("STRING", {"forceInput": True}), + "prepend_negative_l": ("STRING", {"forceInput": True}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_pipeLoaderSDXL_v2.version, "my_unique_id": "UNIQUE_ID",} + } + + RETURN_TYPES = ("PIPE_LINE_SDXL" ,"MODEL", "CONDITIONING", "CONDITIONING", "VAE", "CLIP", "MODEL", "CONDITIONING", "CONDITIONING", "CLIP", "LATENT", "INT", "INT", "INT", "STRING", "STRING") + RETURN_NAMES = ("sdxl_pipe","model", "positive", "negative", "vae", "clip", "refiner_model", "refiner_positive", "refiner_negative", "refiner_clip", "latent", "seed", "width", "height", "pos_string", "neg_string") + + + FUNCTION = "sdxl_pipeloader" + CATEGORY = "🌏 tinyterra/pipe" + + def sdxl_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, loras, + refiner_ckpt_name, refiner_config_name, + conditioning_aspect, conditioning_width, conditioning_height, crop_width, crop_height, target_aspect, target_width, target_height, + positive_g, positive_l, negative_g, negative_l, + positive_ascore, negative_ascore, + empty_latent_aspect, empty_latent_width, empty_latent_height, batch_size, seed, + model_override=None, clip_override=None, optional_lora_stack=None, optional_controlnet_stack=None, + refiner_model_override=None, refiner_clip_override=None, + prepend_positive_g=None, prepend_positive_l=None, prepend_negative_g=None, prepend_negative_l=None, + prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + loader.clear_cache(prompt) + model, clip, vae = loader.load_main3(ckpt_name, config_name, vae_name, loras, clip_skip, model_override, clip_override, optional_lora_stack, my_unique_id) + + # Create Empty Latent + sd3 = True if sampler.get_model_type(model) in ['FLUX', 'FLOW'] else False + latent = sampler.emptyLatent(empty_latent_aspect, batch_size, empty_latent_width, empty_latent_height, sd3) + samples = {"samples":latent} + + if refiner_ckpt_name not in ["None", None]: + refiner_model, refiner_clip, refiner_vae = loader.load_main3(refiner_ckpt_name, refiner_config_name, vae_name, None, clip_skip, refiner_model_override, refiner_clip_override) + else: + refiner_model, refiner_clip, refiner_vae = None, None, None + + if empty_latent_aspect and empty_latent_aspect != "width x height [custom]": + empty_latent_width, empty_latent_height = empty_latent_aspect.replace(' ', '').split('[')[0].split('x') + + if conditioning_aspect and conditioning_aspect != "width x height [custom]": + conditioning_factor = conditioning_aspect.split('x')[0] + conditioning_width = int(conditioning_factor) * int(empty_latent_width) + conditioning_height = int(conditioning_factor) * int(empty_latent_height) + + if target_aspect and target_aspect != "width x height [custom]": + target_factor = target_aspect.split('x')[0] + target_width = int(target_factor) * int(empty_latent_width) + target_height = int(target_factor) * int(empty_latent_height) + + + positive_embedding, refiner_positive_embedding = loader.embedding_encodeXL(positive_g, clip, seed=seed, title='pipeLoaderSDXL Positive', my_unique_id=my_unique_id, prepend_text=prepend_positive_g, text2=positive_l, prepend_text2=prepend_positive_l, width=conditioning_width, height=conditioning_height, crop_width=crop_width, crop_height=crop_height, target_width=target_width, target_height=target_height, refiner_clip=refiner_clip, ascore=positive_ascore) + negative_embedding, refiner_negative_embedding = loader.embedding_encodeXL(negative_g, clip, seed=seed, title='pipeLoaderSDXL Negative', my_unique_id=my_unique_id, prepend_text=prepend_negative_g, text2=negative_l, prepend_text2=prepend_negative_l, width=conditioning_width, height=conditioning_height, crop_width=crop_width, crop_height=crop_height, target_width=target_width, target_height=target_height, refiner_clip=refiner_clip, ascore=negative_ascore) + + + if optional_controlnet_stack is not None: + for cnt in optional_controlnet_stack: + positive_embedding, negative_embedding = loader.load_controlNet(positive_embedding, negative_embedding, cnt[0], cnt[1], cnt[2], cnt[3], cnt[4]) + + image = None + + sdxl_pipe = {"model": model, + "positive": positive_embedding, + "negative": negative_embedding, + "vae": vae, + "clip": clip, + + "refiner_model": refiner_model, + "refiner_positive": refiner_positive_embedding, + "refiner_negative": refiner_negative_embedding, + "refiner_clip": refiner_clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": None + } + + final_positive = (prepend_positive_g + ' ' if prepend_positive_g else '') + (positive_g + ' ' if positive_g else '') + (prepend_positive_l + ' ' if prepend_positive_l else '') + (positive_l + ' ' if positive_l else '') + final_negative = (prepend_negative_g + ' ' if prepend_negative_g else '') + (negative_g + ' ' if negative_g else '') + (prepend_negative_l + ' ' if prepend_negative_l else '') + (negative_l + ' ' if negative_l else '') + + return (sdxl_pipe, model, positive_embedding, negative_embedding, vae, clip, refiner_model, refiner_positive_embedding, refiner_negative_embedding, refiner_clip, samples, seed, empty_latent_width, empty_latent_height, final_positive, final_negative) + +class ttN_pipeKSamplerSDXL_v2: + version = '2.3.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"sdxl_pipe": ("PIPE_LINE_SDXL",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "base_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "refiner_steps": ("INT", {"default": 20, "min": 0, "max": 10000}), + "refiner_cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "refiner_denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES, {"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_refiner_model": ("MODEL",), + "optional_refiner_positive": ("CONDITIONING",), + "optional_refiner_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_clip": ("CLIP",), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_pipeKSamplerSDXL_v2.version}, + } + + RETURN_TYPES = ("PIPE_LINE_SDXL", "PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("sdxl_pipe", "pipe","model", "positive", "negative" , "refiner_model", "refiner_positive", "refiner_negative", "latent", "vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/pipe" + + def sample(self, sdxl_pipe, + lora_name, lora_strength, + base_steps, refiner_steps, cfg, denoise, refiner_cfg, refiner_denoise, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, input_image_override=None, adv_xyPlot=None, + seed=None, upscale_model_name=None, upscale_method=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, + prompt=None, extra_pnginfo=None, my_unique_id=None, force_full_denoise=False, disable_noise=False, + optional_refiner_model=None, optional_refiner_positive=None, optional_refiner_negative=None): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + sdxl_model = optional_model if optional_model is not None else sdxl_pipe["model"] + sdxl_positive = optional_positive if optional_positive is not None else sdxl_pipe["positive"] + sdxl_negative = optional_negative if optional_negative is not None else sdxl_pipe["negative"] + sdxl_samples = optional_latent if optional_latent is not None else sdxl_pipe["samples"] + sdxl_images = input_image_override if input_image_override is not None else sdxl_pipe["images"] + sdxl_vae = optional_vae if optional_vae is not None else sdxl_pipe["vae"] + sdxl_clip = optional_clip if optional_clip is not None else sdxl_pipe["clip"] + + sdxl_refiner_model = optional_refiner_model if optional_refiner_model is not None else sdxl_pipe["refiner_model"] + sdxl_refiner_positive = optional_refiner_positive if optional_refiner_positive is not None else sdxl_pipe["refiner_positive"] + #sdxl_refiner_positive = sdxl_positive if sdxl_refiner_positive is None else sdxl_refiner_positive + sdxl_refiner_negative = optional_refiner_negative if optional_refiner_negative is not None else sdxl_pipe["refiner_negative"] + #sdxl_refiner_negative = sdxl_negative if sdxl_refiner_negative is None else sdxl_refiner_negative + sdxl_refiner_clip = sdxl_pipe["refiner_clip"] + + if seed in (None, 'undefined'): + sdxl_seed = sdxl_pipe["seed"] + else: + sdxl_seed = seed + + del sdxl_pipe + + def process_sample_state(sdxl_model, sdxl_images, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_model_strength, lora_clip_strength, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_clip, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, refiner_denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, my_unique_id, preview_latent, force_full_denoise=force_full_denoise, disable_noise=disable_noise): + + # Load Lora + if lora_name not in (None, "None"): + sdxl_model, sdxl_clip = loader.load_lora(lora_name, sdxl_model, sdxl_clip, lora_model_strength, lora_clip_strength) + + total_steps = base_steps + refiner_steps + + # Upscale samples if enabled + if upscale_method != "None": + sdxl_samples = sampler.handle_upscale(sdxl_samples, upscale_method, factor, crop, upscale_model_name, sdxl_vae, sdxl_images, rescale, percent, width, height, longer_side,) + + if (refiner_steps > 0) and (sdxl_refiner_model not in [None, "None"]): + # Base Sample + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, total_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + # Refiner Sample + sdxl_samples = sampler.common_ksampler(sdxl_refiner_model, sdxl_seed, total_steps, refiner_cfg, sampler_name, scheduler, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_samples, + denoise=refiner_denoise, preview_latent=preview_latent, start_step=base_steps, last_step=10000, force_full_denoise=True, disable_noise=True) + else: + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, base_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=True, disable_noise=disable_noise) + + results = list() + if (image_output != "Disabled"): + latent = sdxl_samples["samples"] + sdxl_images = sdxl_vae.decode(latent) + + results = ttN_save.images(sdxl_images, save_prefix, image_output, embed_workflow, file_type) + + new_sdxl_pipe = { + "model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "refiner_model": sdxl_refiner_model, + "refiner_positive": sdxl_refiner_positive, + "refiner_negative": sdxl_refiner_negative, + "refiner_clip": sdxl_refiner_clip, + + "samples": sdxl_samples, + "images": sdxl_images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + pipe = {"model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "samples": sdxl_samples, + "images": sdxl_images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), None) + + return {"ui": {"images": results}, + "result": (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), None)} + + def process_xyPlot(sdxl_model, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_model_strength, lora_clip_strength, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot): + + random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + if samples is None and images is None: + return process_sample_state(sdxl_model, sdxl_images, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_model_strength, lora_clip_strength, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_clip, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, refiner_denoise, + image_output, save_prefix, prompt, my_unique_id, preview_latent, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + new_sdxl_pipe = { + "model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "refiner_model": sdxl_refiner_model, + "refiner_positive": sdxl_refiner_positive, + "refiner_negative": sdxl_refiner_negative, + "refiner_clip": sdxl_refiner_clip, + + "samples": samples, + "images": images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + pipe = {"model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "samples": samples, + "images": images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), plot_image) + + return {"ui": {"images": plot_result}, + "result": (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), plot_image)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save", "Disabled"): + preview_latent = False + + if adv_xyPlot is None: + return process_sample_state(sdxl_model, sdxl_images, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, + lora_name, lora_strength, lora_strength, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_clip, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, refiner_denoise, image_output, save_prefix, file_type, embed_workflow, prompt, my_unique_id, preview_latent) + else: + return process_xyPlot(sdxl_model, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_strength, lora_strength, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot) + +class ttN_pipe_EDIT: + version = '1.1.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": {}, + "optional": { + "pipe": ("PIPE_LINE",), + "model": ("MODEL",), + "pos": ("CONDITIONING",), + "neg": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + "clip": ("CLIP",), + "image": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "forceInput": True}), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_EDIT.version, "my_unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT") + RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "image", "seed") + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/pipe" + + def flush(self, pipe=None, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, image=None, seed=None, my_unique_id=None): + + model = model or pipe.get("model") + if model is None: + ttNl("Model missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + pos = pos or pipe.get("positive") + if pos is None: + ttNl("Positive conditioning missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + neg = neg or pipe.get("negative") + if neg is None: + ttNl("Negative conditioning missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + samples = latent or pipe.get("samples") + if samples is None: + ttNl("Latent missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + vae = vae or pipe.get("vae") + if vae is None: + ttNl("VAE missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + clip = clip or pipe.get("clip") + if clip is None: + ttNl("Clip missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + image = image or pipe.get("images") + if image is None: + ttNl("Image missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + seed = seed or pipe.get("seed") + if seed is None: + ttNl("Seed missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + + new_pipe = { + "model": model, + "positive": pos, + "negative": neg, + "vae": vae, + "clip": clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": pipe["loader_settings"], + } + del pipe + + return (new_pipe, model, pos, neg, latent, vae, clip, image, seed) + +class ttN_pipe_2BASIC: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pipe": ("PIPE_LINE",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_2BASIC.version}, + } + + RETURN_TYPES = ("BASIC_PIPE", "PIPE_LINE",) + RETURN_NAMES = ("basic_pipe", "pipe",) + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/pipe" + + def flush(self, pipe): + basic_pipe = (pipe.get('model'), pipe.get('clip'), pipe.get('vae'), pipe.get('positive'), pipe.get('negative')) + return (basic_pipe, pipe, ) + +class ttN_pipe_2DETAILER: + version = '1.2.0' + @classmethod + def INPUT_TYPES(s): + return {"required": {"pipe": ("PIPE_LINE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "placeholder": "wildcard spec: if kept empty, this option will be ignored"}), + }, + "optional": {"sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_2DETAILER.version}, + } + + RETURN_TYPES = ("DETAILER_PIPE", "PIPE_LINE" ) + RETURN_NAMES = ("detailer_pipe", "pipe") + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/pipe" + + def flush(self, pipe, bbox_detector, wildcard, sam_model_opt=None, segm_detector_opt=None, detailer_hook=None): + detailer_pipe = (pipe.get('model'), pipe.get('clip'), pipe.get('vae'), pipe.get('positive'), pipe.get('negative'), wildcard, + bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) + return (detailer_pipe, pipe, ) + +class ttN_pipeEncodeConcat: + version = '1.0.2' + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pipe": ("PIPE_LINE",), + "toggle": ([True, False],), + }, + "optional": { + "positive": ("STRING", {"default": "Positive","multiline": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + "negative": ("STRING", {"default": "Negative","multiline": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + "optional_positive_from": ("CONDITIONING",), + "optional_negative_from": ("CONDITIONING",), + "optional_clip": ("CLIP",), + }, + "hidden": { + "ttNnodeVersion": ttN_pipeEncodeConcat.version, "my_unique_id": "UNIQUE_ID" + }, + } + + OUTPUT_NODE = True + RETURN_TYPES = ("PIPE_LINE", "CONDITIONING", "CONDITIONING", "CLIP") + RETURN_NAMES = ("pipe", "positive", "negative", "clip") + FUNCTION = "concat" + + CATEGORY = "🌏 tinyterra/pipe" + + def concat(self, toggle, positive_token_normalization, positive_weight_interpretation, + negative_token_normalization, negative_weight_interpretation, + pipe=None, positive='', negative='', seed=None, my_unique_id=None, optional_positive_from=None, optional_negative_from=None, optional_clip=None): + + if toggle == False: + return (pipe, pipe["positive"], pipe["negative"], pipe["clip"]) + + positive_from = optional_positive_from if optional_positive_from is not None else pipe["positive"] + negative_from = optional_negative_from if optional_negative_from is not None else pipe["negative"] + samp_clip = optional_clip if optional_clip is not None else pipe["clip"] + + new_text = '' + + def enConcatConditioning(text, token_normalization, weight_interpretation, conditioning_from, new_text): + out = [] + if "__" in text: + text = loader.nsp_parse(text, pipe["seed"], title="encodeConcat", my_unique_id=my_unique_id) + new_text += text + + conditioning_to, pooled = advanced_encode(samp_clip, text, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled='enable') + conditioning_to = [[conditioning_to, {"pooled_output": pooled}]] + + if len(conditioning_from) > 1: + ttNl("encode and concat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to").t(f'pipeEncodeConcat[{my_unique_id}]').warn().p() + + cond_from = conditioning_from[0][0] + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + tw = torch.cat((t1, cond_from),1) + n = [tw, conditioning_to[i][1].copy()] + out.append(n) + + return out + + pos, neg = None, None + if positive not in ['', None, ' ']: + pos = enConcatConditioning(positive, positive_token_normalization, positive_weight_interpretation, positive_from, new_text) + if negative not in ['', None, ' ']: + neg = enConcatConditioning(negative, negative_token_normalization, negative_weight_interpretation, negative_from, new_text) + + pos = pos if pos is not None else pipe["positive"] + neg = neg if neg is not None else pipe["negative"] + + new_pipe = { + "model": pipe["model"], + "positive": pos, + "negative": neg, + "vae": pipe["vae"], + "clip": samp_clip, + + "samples": pipe["samples"], + "images": pipe["images"], + "seed": pipe["seed"], + + "loader_settings": pipe["loader_settings"], + } + del pipe + + return (new_pipe, new_pipe["positive"], new_pipe["negative"], samp_clip, { "ui": { "string": new_text } } ) + +class ttN_pipeLoraStack: + version = '1.1.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + inputs = { + "required": { + "toggle": ([True, False],), + "mode": (["simple", "advanced"],), + "num_loras": ("INT", {"default": 1, "min": 0, "max": 20}), + }, + "optional": { + "optional_pipe": ("PIPE_LINE", {"default": None}), + "model_override": ("MODEL",), + "clip_override": ("CLIP",), + "optional_lora_stack": ("LORA_STACK",), + }, + "hidden": { + "ttNnodeVersion": (ttN_pipeLoraStack.version), + }, + } + + for i in range(1, 21): + inputs["optional"][f"lora_{i}_name"] = (["None"] + folder_paths.get_filename_list("loras"),{"default": "None"}) + inputs["optional"][f"lora_{i}_strength"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) + inputs["optional"][f"lora_{i}_model_strength"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) + inputs["optional"][f"lora_{i}_clip_strength"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) + + return inputs + + + RETURN_TYPES = ("PIPE_LINE", "LORA_STACK",) + RETURN_NAMES = ("optional_pipe","lora_stack",) + FUNCTION = "stack" + + CATEGORY = "🌏 tinyterra/pipe" + + def stack(self, toggle, mode, num_loras, optional_pipe=None, lora_stack=None, model_override=None, clip_override=None, **kwargs): + if (toggle in [False, None, "False"]) or not kwargs: + return optional_pipe, None + + loras = [] + + # Import Stack values + if lora_stack is not None: + loras.extend([l for l in lora_stack if l[0] != "None"]) + + # Import Lora values + for i in range(1, num_loras + 1): + lora_name = kwargs.get(f"lora_{i}_name") + + if not lora_name or lora_name == "None": + continue + + if mode == "simple": + lora_strength = float(kwargs.get(f"lora_{i}_strength")) + loras.append((lora_name, lora_strength, lora_strength)) + elif mode == "advanced": + model_strength = float(kwargs.get(f"lora_{i}_model_strength")) + clip_strength = float(kwargs.get(f"lora_{i}_clip_strength")) + loras.append((lora_name, model_strength, clip_strength)) + + if not loras: + return optional_pipe, None + + if loras and not optional_pipe: + return optional_pipe, loras + + # Load Loras + model = model_override or optional_pipe.get("model") + clip = clip_override or optional_pipe.get("clip") + + if not model or not clip: + return optional_pipe, loras + + for lora in loras: + model, clip = loader.load_lora(lora[0], model, clip, lora[1], lora[2]) + + new_pipe = { + "model": model, + "positive": optional_pipe["positive"], + "negative": optional_pipe["negative"], + "vae": optional_pipe["vae"], + "clip": clip, + + "samples": optional_pipe["samples"], + "images": optional_pipe["images"], + "seed": optional_pipe["seed"], + + "loader_settings": optional_pipe["loader_settings"], + } + + del optional_pipe + + return new_pipe, loras + +#---------------------------------------------------------------ttN/pipe END------------------------------------------------------------------------# + + +#--------------------------------------------------------------ttN/base START-----------------------------------------------------------------------# +class ttN_tinyLoader: + version = '1.1.0' + @classmethod + def INPUT_TYPES(cls): + aspect_ratios = ["width x height [custom]", + "512 x 512 [S] 1:1", + "768 x 768 [S] 1:1", + "910 x 910 [S] 1:1", + + "512 x 682 [P] 3:4", + "512 x 768 [P] 2:3", + "512 x 910 [P] 9:16", + + "682 x 512 [L] 4:3", + "768 x 512 [L] 3:2", + "910 x 512 [L] 16:9", + + "1024 x 1024 [S] 1:1", + "512 x 1024 [P] 1:2", + "1024 x 512 [L] 2:1", + + "640 x 1536 [P] 9:21", + "704 x 1472 [P] 9:19", + "768 x 1344 [P] 9:16", + "768 x 1216 [P] 5:8", + "832 x 1216 [P] 2:3", + "896 x 1152 [P] 3:4", + + "1536 x 640 [L] 21:9", + "1472 x 704 [L] 19:9", + "1344 x 768 [L] 16:9", + "1216 x 768 [L] 8:5", + "1216 x 832 [L] 3:2", + "1152 x 896 [L] 4:3", + ] + + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "sampling": (["Default", "eps", "v_prediction", "lcm", "x0"], {"default": "Default"}), + "zsnr": ("BOOLEAN", {"default": False}), + "cfg_rescale_mult": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), + + "empty_latent_aspect": (aspect_ratios, {"default":"512 x 512 [S] 1:1"}), + "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_tinyLoader.version, "my_unique_id": "UNIQUE_ID",} + } + + RETURN_TYPES = ("MODEL", "LATENT", "VAE", "CLIP", "INT", "INT",) + RETURN_NAMES = ("model", "latent", "vae", "clip", "width", "height",) + + FUNCTION = "miniloader" + CATEGORY = "🌏 tinyterra/base" + + def miniloader(self, ckpt_name, config_name, sampling, zsnr, cfg_rescale_mult, vae_name, clip_skip, + empty_latent_aspect, empty_latent_width, empty_latent_height, batch_size, + prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + model, clip, vae = loader.load_checkpoint(ckpt_name, config_name, clip_skip) + + # Create Empty Latent + sd3 = True if sampler.get_model_type(model) in ['FLUX', 'FLOW'] else False + latent = sampler.emptyLatent(empty_latent_aspect, batch_size, empty_latent_width, empty_latent_height, sd3) + samples = {"samples": latent} + + if vae_name != "Baked VAE": + vae = loader.load_vae(vae_name) + + if sampling != "Default": + MSD = comfy_extras.nodes_model_advanced.ModelSamplingDiscrete() + model = MSD.patch(model, sampling, zsnr)[0] + + if cfg_rescale_mult > 0: + CFGR = comfy_extras.nodes_model_advanced.RescaleCFG() + model = CFGR.patch(model, cfg_rescale_mult)[0] + + return (model, samples, vae, clip, empty_latent_width, empty_latent_height) + +class ttN_conditioning: + version = '1.0.2' + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + + "loras": ("STRING", {"placeholder": "", "multiline": True}), + + "positive": ("STRING", {"default": "Positive","multiline": True, "dynamicPrompts": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True, "dynamicPrompts": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + "zero_out_empty": ("BOOLEAN", {"default": False}), + }, + "optional": { + "optional_lora_stack": ("LORA_STACK",), + "prepend_positive": ("STRING", {"forceInput": True}), + "prepend_negative": ("STRING", {"forceInput": True}), + }, + "hidden": {"ttNnodeVersion": ttN_conditioning.version, "my_unique_id": "UNIQUE_ID"},} + + RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "CLIP", "STRING", "STRING") + RETURN_NAMES = ("model", "positive", "negative", "clip", "pos_string", "neg_string") + + FUNCTION = "condition" + CATEGORY = "🌏 tinyterra/base" + + def condition(self, model, clip, loras, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, zero_out_empty, + optional_lora_stack=None, prepend_positive=None, prepend_negative=None, + my_unique_id=None): + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = loader.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if loras not in [None, "None"]: + model, clip = loader.load_lora_text(loras, model, clip) + + positive_embedding = loader.embedding_encode(positive, positive_token_normalization, positive_weight_interpretation, clip, title='ttN Conditioning Positive', + my_unique_id=my_unique_id, prepend_text=prepend_positive, zero_out=zero_out_empty) + negative_embedding = loader.embedding_encode(negative, negative_token_normalization, negative_weight_interpretation, clip, title='ttN Conditioning Negative', + my_unique_id=my_unique_id, prepend_text=prepend_negative, zero_out=zero_out_empty) + + final_positive = (prepend_positive + ' ' if prepend_positive else '') + (positive if positive else '') + final_negative = (prepend_negative + ' ' if prepend_negative else '') + (negative if negative else '') + + return (model, positive_embedding, negative_embedding, clip, final_positive, final_negative) + +class ttN_KSampler_v2: + version = '2.3.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "model": ("MODEL",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": { + "clip": ("CLIP",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_KSampler_v2.version + }, + } + + RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("model", "positive", "negative", "latent","vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/base" + + def sample(self, model, positive, negative, latent, vae, + lora_name, lora_strength, + steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, denoise=1.0, + input_image_override=None, + clip=None, seed=None, adv_xyPlot=None, upscale_model_name=None, upscale_method=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, + prompt=None, extra_pnginfo=None, my_unique_id=None, start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + def process_sample_state(model, images, clip, samples, vae, seed, positive, negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise): + # Load Lora + if lora_name not in (None, "None"): + if clip == None: + raise ValueError(f"tinyKSampler [{my_unique_id}] - Lora requires CLIP model") + model, clip = loader.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) + + # Upscale samples if enabled + if upscale_method != "None": + samples = sampler.handle_upscale(samples, upscale_method, factor, crop, upscale_model_name, vae, images, rescale, percent, width, height, longer_side) + + samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + results = list() + if (image_output != "Disabled"): + # Save images + latent = samples["samples"] + images = vae.decode(latent) + + results = ttN_save.images(images, save_prefix, image_output, embed_workflow, file_type) + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (model, positive, negative, samples, vae, clip, images, seed, None) + + return {"ui": {"images": results}, + "result": (model, positive, negative, samples, vae, clip, images, seed, None)} + + def process_xyPlot(model, clip, samp_samples, vae, seed, positive, negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot): + + random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + if samples is None and images is None: + return process_sample_state(model, images, clip, samp_samples, vae, seed, positive, negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + if image_output in ("Hide", "Hide/Save"): + return (model, positive, negative, samples, vae, clip, images, seed, plot_image) + + return {"ui": {"images": plot_result}, "result": (model, positive, negative, samples, vae, clip, images, seed, plot_image)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save", "Disabled"): + preview_latent = False + + if adv_xyPlot is None: + return process_sample_state(model, input_image_override, clip, latent, vae, seed, positive, negative, lora_name, lora_strength, lora_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent) + else: + return process_xyPlot(model, clip, latent, vae, seed, positive, negative, lora_name, lora_strength, lora_strength, steps, cfg, sampler_name, + scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot) + +#---------------------------------------------------------------ttN/base END------------------------------------------------------------------------# + + +#-------------------------------------------------------------ttN/xyPlot START----------------------------------------------------------------------# +class ttN_advanced_XYPlot: + version = '1.2.1' + plotPlaceholder = "_PLOT\nExample:\n\n\n[node_ID:widget_Name='value']\n\n\n[node_ID:widget_Name='value2']\n[node_ID:widget2_Name='value']\n[node_ID2:widget_Name='value']\n\netc..." + + def get_plot_points(plot_data, unique_id, plot_Line): + if plot_data is None or plot_data.strip() == '': + return None + else: + try: + axis_dict = {} + lines = plot_data.split('<') + new_lines = [] + temp_line = '' + + for line in lines: + if line.startswith('lora'): + temp_line += '<' + line + new_lines[-1] = temp_line + else: + new_lines.append(line) + temp_line = line + + for line in new_lines: + if line: + values_label = [] + line = line.split('>', 1) + num, label = line[0].split(':', 1) + axis_dict[num] = {"label": label} + for point in line[1].split("']"): + if point.strip() == '': + continue + + node_id = point.split(':', 1)[0].split('[')[1] + axis_dict[num].setdefault(node_id, {}) + input_name = point.split(':', 1)[1].split('=', 1)[0] + value = point.split("'", 1 )[1] + values_label.append((value, input_name, node_id)) + + axis_dict[num][node_id][input_name] = value + + if label in ['v_label', 'tv_label', 'idtv_label']: + new_label = [] + for value, input_name, node_id in values_label: + if label == 'v_label': + new_label.append(value) + elif label == 'tv_label': + new_label.append(f'{input_name}: {value}') + elif label == 'idtv_label': + new_label.append(f'[{node_id}] {input_name}: {value}') + axis_dict[num]['label'] = ', '.join(new_label) + + except ValueError: + ttNl('Invalid Plot - defaulting to None...').t(f'advanced_XYPlot[{unique_id}] {plot_Line} Axis').warn().p() + return None + return axis_dict + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}), + "save_individuals": ("BOOLEAN", {"default": False}), + "flip_xy": ("BOOLEAN", {"default": False}), + + "x_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'X' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "y_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'Y' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "z_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'Z' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "invert_background": ("BOOLEAN", {"default": False}), + }, + "hidden": { + "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_advanced_XYPlot.version, + }, + } + + RETURN_TYPES = ("ADV_XYPLOT", ) + RETURN_NAMES = ("adv_xyPlot", ) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, grid_spacing, save_individuals, flip_xy, x_plot=None, y_plot=None, z_plot=None, my_unique_id=None, invert_background=False): + x_plot = ttN_advanced_XYPlot.get_plot_points(x_plot, my_unique_id, 'X') + y_plot = ttN_advanced_XYPlot.get_plot_points(y_plot, my_unique_id, 'Y') + z_plot = ttN_advanced_XYPlot.get_plot_points(z_plot, my_unique_id, 'Z') + + if x_plot == {}: + x_plot = None + if y_plot == {}: + y_plot = None + + if flip_xy == True: + x_plot, y_plot = y_plot, x_plot + + xy_plot = {"x_plot": x_plot, + "y_plot": y_plot, + "z_plot": z_plot, + "grid_spacing": grid_spacing, + "save_individuals": save_individuals, + "invert_bg": invert_background} + + return (xy_plot, ) + +class ttN_Plotting(ttN_advanced_XYPlot): + def plot(self, **args): + xy_plot = None + return (xy_plot, ) + +class ttN_advPlot_images: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "enabled": ('BOOLEAN',{'default': True}), + "image": ('IMAGE',{}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": { + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_advPlot_images.version, + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + RETURN_NAMES = ("images", "plot_image") + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, enabled, image, adv_xyPlot, image_output, save_prefix, file_type, embed_workflow, prompt=None, extra_pnginfo=None, my_unique_id=None): + if enabled == False or adv_xyPlot is None: + return (image, None) + + my_unique_id = int(my_unique_id) + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + #random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + if image_output in ("Hide", "Hide/Save"): + return (images, plot_image) + + return {"ui": {"images": plot_result}, "result": (images, plot_image)} + +class ttN_advPlot_range: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "node": ([AnyType("Connect to xyPlot for options"),],{}), + "widget": ([AnyType("Select node for options"),],{}), + + "range_mode": (['step_int','num_steps_int','step_float','num_steps_float'],{}), + "start": ("FLOAT", {"min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "default": 1,}), + "step": ("FLOAT", {"min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "default": 1,}), + "stop": ("FLOAT", {"min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "default": 5,}), + "include_stop": ("BOOLEAN",{"default": True}), + "num_steps": ("INT", {"min": 1, "max": 1000, "step": 1, "default": 5,}), + + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + + }, + "hidden": { + "ttNnodeVersion": ttN_advPlot_range.version, + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, node, widget, range_mode, start, step, stop, include_stop, num_steps, label_type): + if '[' in node and ']' in node: + nodeid = node.split('[', 1)[1].split(']', 1)[0] + else: + return {"ui": {"text": ''}, "result": ('',)} + + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map[label_type] + + plot_text = [] + vals = [] + + if range_mode.startswith('step_'): + for num in range(1, num_steps + 1): + vals.append(start + step * (num - 1)) + if range_mode.startswith('num_steps'): + vals = np.linspace(start, stop, num_steps, endpoint=include_stop).tolist() + + for i, val in enumerate(vals): + if range_mode.endswith('int'): + val = int(round(val, 0)) + else: + val = round(val, 2) + line = f"[{nodeid}:{widget}='{val}']" + plot_text.append(f"<{i+1}:{label}>") + plot_text.append(line) + + out = '\n'.join(plot_text) + + return {"ui": {"text": out}, "result": (out,)} + +class ttN_advPlot_string: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "node": ([AnyType("Connect to xyPlot for options"),],{}), + "widget": ([AnyType("Select node for options"),],{}), + + "replace_mode": ("BOOLEAN",{"default": False}), + "search_string": ("STRING",{"default":""}), + "text": ("STRING", {"default":"","multiline": True}), + "delimiter": ("STRING", {"default":"\\n","multiline": False}), + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + }, + "hidden": { + "ttNnodeVersion": ttN_advPlot_range.version, + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, node, widget, replace_mode, search_string, text, delimiter, label_type): + if '[' in node and ']' in node: + nodeid = node.split('[', 1)[1].split(']', 1)[0] + else: + return {"ui": {"text": ''}, "result": ('',)} + + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map[label_type] + + plot_text = [] + + delimiter = delimiter.replace('\\n', '\n') + vals = text.split(delimiter) + + for i, val in enumerate(vals): + if val.strip() == '': + continue + if replace_mode: + line = f"[{nodeid}:{widget}='%{search_string};{val}%']" + else: + line = f"[{nodeid}:{widget}='{val}']" + plot_text.append(f"<{i+1}:{label}>") + plot_text.append(line) + + out = '\n'.join(plot_text) + + return {"ui": {"text": out}, "result": (out,)} + +class ttN_advPlot_combo: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "node": ([AnyType("Connect to xyPlot for options"),],{}), + "widget": ([AnyType("Select node for options"),],{}), + + "mode": (['all', 'range', 'select'],), + "start_from": ([AnyType("Select widget for options"),],), + "end_with": ([AnyType("Select widget for options"),],), + + "select": ([AnyType("Select widget for options"),],), + "selection": ("STRING", {"default":"","multiline": True}), + + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + }, + "hidden": { + "ttNnodeVersion": ttN_advPlot_range.version, "prompt": "PROMPT", + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, node, widget, mode, start_from, end_with, select, selection, label_type, prompt=None): + if '[' in node and ']' in node: + nodeid = node.split('[', 1)[1].split(']', 1)[0] + else: + return {"ui": {"text": ''}, "result": ('',)} + + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map[label_type] + + plot_text = [] + + class_type = prompt[nodeid]['class_type'] + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + valid_inputs = class_def.INPUT_TYPES() + options = valid_inputs["required"][widget][0] or valid_inputs["optional"][widget][0] + + vals = [] + if mode == 'all': + vals = options + elif mode == 'range': + start_index = options.index(start_from) + stop_index = options.index(end_with) + 1 + if start_index > stop_index: + start_index, stop_index = stop_index, start_index + vals = options[start_index:stop_index] + elif mode == 'select': + selection = selection.split('\n') + for s in selection: + s.strip() + if s in options: + vals.append(s) + + for i, val in enumerate(vals): + line = f"[{nodeid}:{widget}='{val}']" + plot_text.append(f"<{i+1}:{label}>") + plot_text.append(line) + + out = '\n'.join(plot_text) + + return {"ui": {"text": out}, "result": (out,)} + +class ttN_advPlot_merge: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + }, + "optional": { + "plot_text1": ("STRING", {"forceInput": True,}), + "plot_text2": ("STRING",{"forceInput": True,}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, label_type, plot_text1='', plot_text2='', ): + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map.get(label_type, 'v_label') + + text1 = plot_text1.split("<") if plot_text1 else [] + text2 = plot_text2.split("<") if plot_text2 else [] + + number_of_lines = max(len(text1) - 1, len(text2) - 1, 0) + if number_of_lines == 0: + return '' + + lines = [] + for num in range(1, number_of_lines + 1): + lines.append(f'<{num}:{label}>\n') + + for text in (text1, text2): + if num < len(text): + parts = text[num].split('>\n', 1) + if len(parts) == 2: + lines.append(parts[1]) + if not parts[1].endswith('\n'): + lines.append('\n') + + out = ''.join(lines) + return {"ui": {"text": out}, "result": (out,)} +#--------------------------------------------------------------ttN/xyPlot END-----------------------------------------------------------------------# + + +#----------------------------------------------------------------misc START------------------------------------------------------------------------# +WEIGHTED_SUM = "Weighted sum = ( A*(1-M) + B*M )" +ADD_DIFFERENCE = "Add difference = ( A + (B-C)*M )" +A_ONLY = "A Only" +MODEL_INTERPOLATIONS = [WEIGHTED_SUM, ADD_DIFFERENCE, A_ONLY] +FOLLOW = "Follow model interp" +B_ONLY = "B Only" +C_ONLY = "C Only" +CLIP_INTERPOLATIONS = [FOLLOW, WEIGHTED_SUM, ADD_DIFFERENCE, A_ONLY, B_ONLY, C_ONLY] +ABC = "ABC" + +class ttN_multiModelMerge: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "ckpt_A_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_A_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "ckpt_B_name": (["None",] + folder_paths.get_filename_list("checkpoints"), ), + "config_B_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "ckpt_C_name": (["None",] + folder_paths.get_filename_list("checkpoints"), ), + "config_C_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + + "model_interpolation": (MODEL_INTERPOLATIONS,), + "model_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + + "clip_interpolation": (CLIP_INTERPOLATIONS,), + "clip_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "model_A_override": ("MODEL",), + "model_B_override": ("MODEL",), + "model_C_override": ("MODEL",), + "clip_A_override": ("CLIP",), + "clip_B_override": ("CLIP",), + "clip_C_override": ("CLIP",), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "ttNnodeVersion": ttN_multiModelMerge.version, "my_unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("MODEL", "CLIP",) + RETURN_NAMES = ("model", "clip",) + FUNCTION = "mergificate" + + CATEGORY = "🌏 tinyterra" + + def mergificate(self, ckpt_A_name, config_A_name, ckpt_B_name, config_B_name, ckpt_C_name, config_C_name, + model_interpolation, model_multiplier, clip_interpolation, clip_multiplier, + model_A_override=None, model_B_override=None, model_C_override=None, + clip_A_override=None, clip_B_override=None, clip_C_override=None, + prompt=None, extra_pnginfo=None, my_unique_id=None): + + def required_assets(model_interpolation, clip_interpolation): + required = set(["model_A"]) + + if clip_interpolation in [A_ONLY, B_ONLY, C_ONLY]: + required.add(f"clip_{clip_interpolation[0]}") + elif clip_interpolation in [WEIGHTED_SUM, ADD_DIFFERENCE]: + required.update([f"clip_{letter}" for letter in ABC if letter in clip_interpolation]) + elif clip_interpolation == FOLLOW: + required.add("clip_A") + + if model_interpolation in [WEIGHTED_SUM, ADD_DIFFERENCE]: + letters = [letter for letter in ABC if letter in model_interpolation] + required.update([f"model_{letter}" for letter in letters]) + if clip_interpolation == FOLLOW: + required.update([f"clip_{letter}" for letter in letters]) + + return sorted(list(required)) + + def _collect_letter(letter, required_list, model_override, clip_override, ckpt_name, config_name = None): + model, clip, loaded_clip = None, None, None + config_name = config_name + + if f'model_{letter}' in required_list: + if model_override not in [None, "None"]: + model = model_override + else: + if ckpt_name not in [None, "None"]: + model, loaded_clip, _ = loader.load_checkpoint(ckpt_name, config_name) + else: + e = f"Checkpoint name or model override not provided for model_{letter}.\nUnable to merge models using the following interpolation: {model_interpolation}" + ttNl(e).t(f'multiModelMerge [{my_unique_id}]').error().p().interrupt(e) + + if f'clip_{letter}' in required_list: + if clip_override is not None: + clip = clip_override + elif loaded_clip is not None: + clip = loaded_clip + elif ckpt_name not in [None, "None"]: + _, clip, _ = loader.load_checkpoint(ckpt_name, config_name) + else: + e = f"Checkpoint name or clip override not provided for clip_{letter}.\nUnable to merge clips using the following interpolation: {clip_interpolation}" + ttNl(e).t(f'multiModelMerge [{my_unique_id}]').error().p().interrupt(e) + + return model, clip + + def merge(base_model, base_strength, patch_model, patch_strength): + m = base_model.clone() + kp = patch_model.get_key_patches("diffusion_model.") + for k in kp: + m.add_patches({k: kp[k]}, patch_strength, base_strength) + return m + + def clip_merge(base_clip, base_strength, patch_clip, patch_strength): + m = base_clip.clone() + kp = patch_clip.get_key_patches() + for k in kp: + if k.endswith(".position_ids") or k.endswith(".logit_scale"): + continue + m.add_patches({k: kp[k]}, patch_strength, base_strength) + return m + + def _add_assets(a1, a2, is_clip=False, multiplier=1.0, weighted=False): + if is_clip: + if weighted: + return clip_merge(a1, (1.0 - multiplier), a2, multiplier) + else: + return clip_merge(a1, 1.0, a2, multiplier) + else: + if weighted: + return merge(a1, (1.0 - multiplier), a2, multiplier) + else: + return merge(a1, 1.0, a2, multiplier) + + def _subtract_assets(a1, a2, is_clip=False, multiplier=1.0): + if is_clip: + return clip_merge(a1, 1.0, a2, -multiplier) + else: + return merge(a1, 1.0, a2, -multiplier) + + required_list = required_assets(model_interpolation, clip_interpolation) + model_A, clip_A = _collect_letter("A", required_list, model_A_override, clip_A_override, ckpt_A_name, config_A_name) + model_B, clip_B = _collect_letter("B", required_list, model_B_override, clip_B_override, ckpt_B_name, config_B_name) + model_C, clip_C = _collect_letter("C", required_list, model_C_override, clip_C_override, ckpt_C_name, config_C_name) + + if (model_interpolation == A_ONLY): + model = model_A + if (model_interpolation == WEIGHTED_SUM): + model = _add_assets(model_A, model_B, False, model_multiplier, True) + if (model_interpolation == ADD_DIFFERENCE): + model = _add_assets(model_A, _subtract_assets(model_B, model_C), False, model_multiplier) + + if (clip_interpolation == FOLLOW): + clip_interpolation = model_interpolation + if (clip_interpolation == A_ONLY): + clip = clip_A + if (clip_interpolation == B_ONLY): + clip = clip_B + if (clip_interpolation == C_ONLY): + clip = clip_C + if (clip_interpolation == WEIGHTED_SUM): + clip = _add_assets(clip_A, clip_B, True, clip_multiplier, True) + if (clip_interpolation == ADD_DIFFERENCE): + clip = _add_assets(clip_A, _subtract_assets(clip_B, clip_C, True), True, clip_multiplier) + + return (model, clip) + +#-----------------------------------------------------------------misc END-------------------------------------------------------------------------# + +#---------------------------------------------------------------ttN/text START----------------------------------------------------------------------# +class ttN_text: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"default": "", "multiline": True, "dynamicPrompts": True}), + }, + "hidden": {"ttNnodeVersion": ttN_text.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + @staticmethod + def conmeow(text): + return text, + +class ttN_textDebug: + version = '1.0.' + def __init__(self): + self.num = 0 + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "print_to_console": ([False, True],), + "console_title": ("STRING", {"default": ""}), + "execute": (["Always", "On Change"],), + "text": ("STRING", {"default": '', "multiline": True, "forceInput": True, "dynamicPrompts": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_textDebug.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + FUNCTION = "write" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/text" + + def write(self, print_to_console, console_title, execute, text, prompt, extra_pnginfo, my_unique_id): + if execute == "Always": + def IS_CHANGED(self): + self.num += 1 if self.num == 0 else -1 + return self.num + setattr(self.__class__, 'IS_CHANGED', IS_CHANGED) + + if execute == "On Change": + if hasattr(self.__class__, 'IS_CHANGED'): + delattr(self.__class__, 'IS_CHANGED') + + if print_to_console == True: + if console_title != "": + ttNl(text).t(f'textDebug[{my_unique_id}] - {CC.VIOLET}{console_title}').p() + else: + input_node = prompt[my_unique_id]["inputs"]["text"] + + input_from = None + for node in extra_pnginfo["workflow"]["nodes"]: + if node['id'] == int(input_node[0]): + input_from = node['outputs'][input_node[1]].get('label') + + if input_from == None: + input_from = node['outputs'][input_node[1]].get('name') + + ttNl(text).t(f'textDebug[{my_unique_id}] - {CC.VIOLET}{input_from}').p() + + return {"ui": {"text": text}, + "result": (text,)} + +class ttN_concat: + version = '1.0.0' + def __init__(self): + pass + """ + Concatenate 2 strings + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text1": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text2": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text3": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "delimiter": ("STRING", {"default":",","multiline": False}), + }, + "hidden": {"ttNnodeVersion": ttN_concat.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("concat",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + def conmeow(self, text1='', text2='', text3='', delimiter=''): + text1 = '' if text1 == 'undefined' else text1 + text2 = '' if text2 == 'undefined' else text2 + text3 = '' if text3 == 'undefined' else text3 + + if delimiter == '\\n': + delimiter = '\n' + + concat = delimiter.join([text1, text2, text3]) + + return (concat,) + +class ttN_text3BOX_3WAYconcat: + version = '1.0.0' + def __init__(self): + pass + """ + Concatenate 3 strings, in various ways. + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text1": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text2": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text3": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "delimiter": ("STRING", {"default":",","multiline": False}), + }, + "hidden": {"ttNnodeVersion": ttN_text3BOX_3WAYconcat.version}, + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",) + RETURN_NAMES = ("text1", "text2", "text3", "1 & 2", "1 & 3", "2 & 3", "concat",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + def conmeow(self, text1='', text2='', text3='', delimiter=''): + text1 = '' if text1 == 'undefined' else text1 + text2 = '' if text2 == 'undefined' else text2 + text3 = '' if text3 == 'undefined' else text3 + + if delimiter == '\\n': + delimiter = '\n' + + t_1n2 = delimiter.join([text1, text2]) + t_1n3 = delimiter.join([text1, text3]) + t_2n3 = delimiter.join([text2, text3]) + concat = delimiter.join([text1, text2, text3]) + + return text1, text2, text3, t_1n2, t_1n3, t_2n3, concat + +class ttN_text7BOX_concat: + version = '1.0.0' + def __init__(self): + pass + """ + Concatenate many strings + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text1": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text2": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text3": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text4": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text5": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text6": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text7": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "delimiter": ("STRING", {"default":",","multiline": False}), + }, + "hidden": {"ttNnodeVersion": ttN_text7BOX_concat.version}, + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",) + RETURN_NAMES = ("text1", "text2", "text3", "text4", "text5", "text6", "text7", "concat",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + def conmeow(self, text1, text2, text3, text4, text5, text6, text7, delimiter): + text1 = '' if text1 == 'undefined' else text1 + text2 = '' if text2 == 'undefined' else text2 + text3 = '' if text3 == 'undefined' else text3 + text4 = '' if text4 == 'undefined' else text4 + text5 = '' if text5 == 'undefined' else text5 + text6 = '' if text6 == 'undefined' else text6 + text7 = '' if text7 == 'undefined' else text7 + + if delimiter == '\\n': + delimiter = '\n' + + texts = [text1, text2, text3, text4, text5, text6, text7] + concat = delimiter.join(text for text in texts if text) + return text1, text2, text3, text4, text5, text6, text7, concat + +class ttN_textCycleLine: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "index": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "index_control": (['increment', 'decrement', 'randomize','fixed'],), + }, + "hidden": {"ttNnodeVersion": ttN_textCycleLine.version}, + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "cycle" + + CATEGORY = "🌏 tinyterra/text" + + def cycle(self, text, index, index_control='randomized'): + lines = text.split('\n') + + if index >= len(lines): + index = len(lines) - 1 + return (lines[index],) + +class ttN_textOUPUT: + version = '1.0.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text_output": (["Preview", "Save"],{"default": "Preview"}), + "text": ("STRING", {"multiline": True}), + "output_path": ("STRING", {"default": folder_paths.get_output_directory(), "multiline": False}), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "number_padding": (["None", 2, 3, 4, 5, 6, 7, 8, 9],{"default": 5}), + "file_type": (["txt", "md", "rtf", "log", "ini", "csv"], {"default": "txt"}), + "overwrite_existing": ("BOOLEAN", {"default": False}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_imageOUPUT.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + FUNCTION = "output" + CATEGORY = "🌏 tinyterra/text" + OUTPUT_NODE = True + + def output(self, text_output, text, output_path, save_prefix, number_padding, file_type, overwrite_existing, prompt, extra_pnginfo, my_unique_id): + if text_output == 'Save': + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo, number_padding, overwrite_existing, output_path) + ttN_save.textfile(text, save_prefix, file_type) + + # Output text results to ui and node outputs + return {"ui": {"text": text}, + "result": (text,)} +#---------------------------------------------------------------ttN/text END------------------------------------------------------------------------# + + +#---------------------------------------------------------------ttN/util START----------------------------------------------------------------------# +class ttN_INT: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "int": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"ttNnodeVersion": ttN_INT.version}, + } + + RETURN_TYPES = ("INT", "FLOAT", "STRING",) + RETURN_NAMES = ("int", "float", "text",) + FUNCTION = "convert" + + CATEGORY = "🌏 tinyterra/util" + + @staticmethod + def convert(int): + return int, float(int), str(int) + +class ttN_FLOAT: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "float": ("FLOAT", {"default": 0.00, "min": 0.00, "max": 0xffffffffffffffff, 'step': 0.01}), + }, + "hidden": {"ttNnodeVersion": ttN_FLOAT.version}, + } + + RETURN_TYPES = ("FLOAT", "INT", "STRING",) + RETURN_NAMES = ("float", "int", "text",) + FUNCTION = "convert" + + CATEGORY = "🌏 tinyterra/util" + + @staticmethod + def convert(float): + return float, int(float), str(float) + +class ttN_SEED: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"ttNnodeVersion": ttN_SEED.version}, + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("seed",) + FUNCTION = "plant" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/util" + + @staticmethod + def plant(seed): + return seed, + +class ttN_debugInput: + version = '1.0.0' + @classmethod + def INPUT_TYPES(s): + return {"required": { + "print_to_console": ("BOOLEAN",), + "console_title": ("STRING", {"default": "ttN debug:"}), + "console_color": (["Black", "Red", "Green", "Yellow", "Blue", "Violet", "Cyan", "White", "Grey", "LightRed", "LightGreen", "LightYellow", "LightBlue", "LightViolet", "LightCyan", "LightWhite"], {"default": "Red"}), + }, + "optional": { + "debug": (AnyType("*"), {"default": None}), + } + } + + RETURN_TYPES = tuple() + RETURN_NAMES = tuple() + FUNCTION = "debug" + CATEGORY = "🌏 tinyterra/util" + OUTPUT_NODE = True + + def debug(_, print_to_console, console_title, console_color, debug=None): + + text = str(debug) + if print_to_console: + print(f"{getattr(CC, console_color.upper())}{console_title}\n{text}{CC.CLEAN}") + + return {"ui": {"text": text}, "return": tuple()} + +#---------------------------------------------------------------ttN/util End------------------------------------------------------------------------# + + +#---------------------------------------------------------------ttN/image START---------------------------------------------------------------------# +class ttN_imageREMBG: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],{"default": "Preview"}), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_imageREMBG.version}, + } + + + RETURN_TYPES = ("IMAGE", "MASK") + RETURN_NAMES = ("image", "mask") + FUNCTION = "remove_background" + CATEGORY = "🌏 tinyterra/image" + OUTPUT_NODE = True + + def remove_background(self, image, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id): + try: + from rembg import remove + except ImportError: + raise ImportError("REMBG is not installed.\nPlease install it with `pip install rembg` or from https://github.com/danielgatis/rembg.") + + image = remove(ttNsampler.tensor2pil(image)) + tensor = ttNsampler.pil2tensor(image) + + #Get alpha mask + if image.getbands() != ("R", "G", "B", "A"): + image = image.convert("RGBA") + mask = None + if "A" in image.getbands(): + mask = np.array(image.getchannel("A")).astype(np.float32) / 255.0 + mask = torch.from_numpy(mask) + mask = 1. - mask + else: + mask = torch.zeros((64,64), dtype=torch.float32, device=sampler.device) + + if image_output == "Disabled": + results = [] + else: + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + results = ttN_save.images(tensor, save_prefix, image_output) + + if image_output in ("Hide", "Hide/Save"): + return (tensor, mask) + + # Output image results to ui and node outputs + return {"ui": {"images": results}, + "result": (tensor, mask)} + +class ttN_imageOUPUT: + version = '1.2.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],{"default": "Preview"}), + "output_path": ("STRING", {"default": folder_paths.get_output_directory(), "multiline": False}), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "number_padding": (["None", 2, 3, 4, 5, 6, 7, 8, 9],{"default": 5}), + "file_type": (OUTPUT_FILETYPES, {"default": "png"}), + "overwrite_existing": ("BOOLEAN", {"default": False}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_imageOUPUT.version}, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "output" + CATEGORY = "🌏 tinyterra/image" + OUTPUT_NODE = True + + def output(self, image, image_output, output_path, save_prefix, number_padding, file_type, overwrite_existing, embed_workflow, prompt, extra_pnginfo, my_unique_id): + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo, number_padding, overwrite_existing, output_path) + results = ttN_save.images(image, save_prefix, image_output, embed_workflow, file_type) + + if image_output in ("Hide", "Hide/Save"): + return (image,) + + # Output image results to ui and node outputs + return {"ui": {"images": results}, + "result": (image,)} + +class ttN_modelScale: + version = '1.1.0' + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"),), + "vae": ("VAE",), + "image": ("IMAGE",), + "rescale_after_model": ([False, True],{"default": True}), + "rescale_method": (s.upscale_methods,), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (s.crop_methods,), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "output_latent": ([False, True],{"default": True}),}, + "hidden": { "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_modelScale.version}, + } + + RETURN_TYPES = ("LATENT", "IMAGE",) + RETURN_NAMES = ("latent", 'image',) + + FUNCTION = "upscale" + CATEGORY = "🌏 tinyterra/image" + OUTPUT_NODE = True + + def vae_encode_crop_pixels(self, pixels): + x = (pixels.shape[1] // 8) * 8 + y = (pixels.shape[2] // 8) * 8 + if pixels.shape[1] != x or pixels.shape[2] != y: + x_offset = (pixels.shape[1] % 8) // 2 + y_offset = (pixels.shape[2] % 8) // 2 + pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :] + return pixels + + def upscale(self, model_name, vae, image, rescale_after_model, rescale_method, rescale, percent, width, height, longer_side, crop, image_output, save_prefix, output_latent, prompt=None, extra_pnginfo=None, my_unique_id=None): + # Load Model + upscale_model = comfy_extras.nodes_upscale_model.UpscaleModelLoader().load_model(model_name)[0] + + # Model upscale + s = comfy_extras.nodes_upscale_model.ImageUpscaleWithModel().upscale(upscale_model, image)[0] + + # Post Model Rescale + if rescale_after_model == True: + samples = s.movedim(-1, 1) + orig_height = samples.shape[2] + orig_width = samples.shape[3] + if rescale == "by percentage" and percent != 0: + height = percent / 100 * orig_height + width = percent / 100 * orig_width + if (width > MAX_RESOLUTION): + width = MAX_RESOLUTION + if (height > MAX_RESOLUTION): + height = MAX_RESOLUTION + + width = ttNsampler.enforce_mul_of_64(width) + height = ttNsampler.enforce_mul_of_64(height) + elif rescale == "to longer side - maintain aspect": + longer_side = ttNsampler.enforce_mul_of_64(longer_side) + if orig_width > orig_height: + width, height = longer_side, ttNsampler.enforce_mul_of_64(longer_side * orig_height / orig_width) + else: + width, height = ttNsampler.enforce_mul_of_64(longer_side * orig_width / orig_height), longer_side + + + s = comfy.utils.common_upscale(samples, width, height, rescale_method, crop) + s = s.movedim(1,-1) + + # vae encode + if output_latent == True: + pixels = self.vae_encode_crop_pixels(s) + t = vae.encode(pixels[:,:,:,:3]) + if image_output == "return latent": + return ({"samples":t}) + else: + t = None + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + results = ttN_save.images(s, save_prefix, image_output) + + if image_output in ("Hide", "Hide/Save"): + return ({"samples":t}, s,) + + return {"ui": {"images": results}, + "result": ({"samples":t}, s,)} + +#---------------------------------------------------------------ttN/image END-----------------------------------------------------------------------# + +TTN_VERSIONS = { + "tinyterraNodes": ttN_version, + "pipeLoader_v2": ttN_pipeLoader_v2.version, + "tinyKSampler": ttN_KSampler_v2.version, + "tinyLoader": ttN_tinyLoader.version, + "tinyConditioning": ttN_conditioning.version, + "pipeKSampler_v2": ttN_pipeKSampler_v2.version, + "pipeKSamplerAdvanced_v2": ttN_pipeKSamplerAdvanced_v2.version, + "pipeLoaderSDXL_v2": ttN_pipeLoaderSDXL_v2.version, + "pipeKSamplerSDXL_v2": ttN_pipeKSamplerSDXL_v2.version, + "pipeEDIT": ttN_pipe_EDIT.version, + "pipe2BASIC": ttN_pipe_2BASIC.version, + "pipe2DETAILER": ttN_pipe_2DETAILER.version, + "advanced xyPlot": ttN_advanced_XYPlot.version, + 'advPlot images': ttN_advPlot_images.version, + "advPlot range": ttN_advPlot_range.version, + "advPlot string": ttN_advPlot_string.version, + "advPlot combo": ttN_advPlot_combo.version, + "advPlot merge": ttN_advPlot_merge.version, + "pipeEncodeConcat": ttN_pipeEncodeConcat.version, + "multiLoraStack": ttN_pipeLoraStack.version, + "multiModelMerge": ttN_multiModelMerge.version, + "debugInput": ttN_debugInput.version, + "text": ttN_text.version, + "textDebug": ttN_textDebug.version, + "concat": ttN_concat.version, + "text3BOX_3WAYconcat": ttN_text3BOX_3WAYconcat.version, + "text7BOX_concat": ttN_text7BOX_concat.version, + "textCycleLine": ttN_textCycleLine.version, + "textOutput": ttN_textOUPUT.version, + "imageOutput": ttN_imageOUPUT.version, + "imageREMBG": ttN_imageREMBG.version, + "hiresfixScale": ttN_modelScale.version, + "int": ttN_INT.version, + "float": ttN_FLOAT.version, + "seed": ttN_SEED.version +} +NODE_CLASS_MAPPINGS = { + #ttN/base + "ttN tinyLoader": ttN_tinyLoader, + "ttN conditioning": ttN_conditioning, + "ttN KSampler_v2": ttN_KSampler_v2, + + #ttN/pipe + "ttN pipeLoader_v2": ttN_pipeLoader_v2, + "ttN pipeKSampler_v2": ttN_pipeKSampler_v2, + "ttN pipeKSamplerAdvanced_v2": ttN_pipeKSamplerAdvanced_v2, + "ttN pipeLoaderSDXL_v2": ttN_pipeLoaderSDXL_v2, + "ttN pipeKSamplerSDXL_v2": ttN_pipeKSamplerSDXL_v2, + "ttN advanced xyPlot": ttN_advanced_XYPlot, + "ttN advPlot images": ttN_advPlot_images, + "ttN advPlot range": ttN_advPlot_range, + "ttN advPlot string": ttN_advPlot_string, + "ttN advPlot combo": ttN_advPlot_combo, + "ttN advPlot merge": ttN_advPlot_merge, + "ttN pipeEDIT": ttN_pipe_EDIT, + "ttN pipe2BASIC": ttN_pipe_2BASIC, + "ttN pipe2DETAILER": ttN_pipe_2DETAILER, + "ttN pipeEncodeConcat": ttN_pipeEncodeConcat, + "ttN pipeLoraStack": ttN_pipeLoraStack, + + #ttN/misc + "ttN multiModelMerge": ttN_multiModelMerge, + "ttN debugInput": ttN_debugInput, + + #ttN/text + "ttN text": ttN_text, + "ttN textDebug": ttN_textDebug, + "ttN concat": ttN_concat, + "ttN text3BOX_3WAYconcat": ttN_text3BOX_3WAYconcat, + "ttN text7BOX_concat": ttN_text7BOX_concat, + "ttN textCycleLine": ttN_textCycleLine, + "ttN textOutput": ttN_textOUPUT, + + #ttN/image + "ttN imageOutput": ttN_imageOUPUT, + "ttN imageREMBG": ttN_imageREMBG, + "ttN hiresfixScale": ttN_modelScale, + + #ttN/util + "ttN int": ttN_INT, + "ttN float": ttN_FLOAT, + "ttN seed": ttN_SEED, +} +NODE_DISPLAY_NAME_MAPPINGS = { + #ttN/base + "ttN tinyLoader": "tinyLoader", + "ttN conditioning": "tinyConditioning", + "ttN KSampler_v2": "tinyKSampler", + + #ttN/pipe + "ttN pipeLoader_v2": "pipeLoader", + "ttN pipeKSampler_v2": "pipeKSampler", + "ttN pipeKSamplerAdvanced_v2": "pipeKSamplerAdvanced", + "ttN pipeLoaderSDXL_v2": "pipeLoaderSDXL", + "ttN pipeKSamplerSDXL_v2": "pipeKSamplerSDXL", + "ttN pipeEDIT": "pipeEDIT", + "ttN pipe2BASIC": "pipe > basic_pipe", + "ttN pipe2DETAILER": "pipe > detailer_pipe", + "ttN pipeEncodeConcat": "pipeEncodeConcat", + "ttN pipeLoraStack": "pipeLoraStack", + + #ttN/xyPlot + "ttN advanced xyPlot": "advanced xyPlot", + "ttN advPlot images": "advPlot images", + "ttN advPlot range": "advPlot range", + "ttN advPlot string": "advPlot string", + "ttN advPlot combo": "advPlot combo", + "ttN advPlot merge": "advPlot merge", + + #ttN/misc + "ttN multiModelMerge": "multiModelMerge", + "ttN debugInput": "debugInput", + + #ttN/text + "ttN text": "text", + "ttN textDebug": "textDebug", + "ttN concat": "textConcat", + "ttN text7BOX_concat": "7x TXT Loader Concat", + "ttN text3BOX_3WAYconcat": "3x TXT Loader MultiConcat", + "ttN textCycleLine": "textCycleLine", + "ttN textOutput": "textOutput", + + #ttN/image + "ttN imageREMBG": "imageRemBG", + "ttN imageOutput": "imageOutput", + "ttN hiresfixScale": "hiresfixScale", + + #ttN/util + "ttN int": "int", + "ttN float": "float", + "ttN seed": "seed", +} + +ttNl('Loaded').full().p() + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# (upscale from QualityOfLifeSuite_Omar92) - https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92 # +# (Node weights from BlenderNeko/ComfyUI_ADV_CLIP_emb) - https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb # +#---------------------------------------------------------------------------------------------------------------------------------------------------# diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNexecutor.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNexecutor.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb927fa5cc3d47460811e07c76ff50a12490443 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNexecutor.py @@ -0,0 +1,510 @@ +import nodes +import torch +import comfy.model_management +import copy +import logging +import sys +import traceback +from execution import full_type_name + +def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): + valid_inputs = class_def.INPUT_TYPES() + input_data_all = {} + for x in inputs: + input_data = inputs[x] + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id not in outputs: + input_data_all[x] = (None,) + continue + obj = outputs[input_unique_id][output_index] + input_data_all[x] = obj + else: + if ("required" in valid_inputs and x in valid_inputs["required"]) or ("optional" in valid_inputs and x in valid_inputs["optional"]): + input_data_all[x] = [input_data] + + if "hidden" in valid_inputs: + h = valid_inputs["hidden"] + for x in h: + if h[x] == "PROMPT": + input_data_all[x] = [prompt] + if h[x] == "EXTRA_PNGINFO": + input_data_all[x] = [extra_data.get('extra_pnginfo', None)] + if h[x] == "UNIQUE_ID": + input_data_all[x] = [unique_id] + return input_data_all + +def get_output_data(obj, input_data_all): + results = [] + uis = [] + return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) + for r in return_values: + if isinstance(r, dict): + if 'ui' in r: + uis.append(r['ui']) + if 'result' in r: + results.append(r['result']) + else: + results.append(r) + output = [] + if len(results) > 0: + # check which outputs need concatenating + # Handle both old tuples and new NodeOutput objects + first_result = results[0] + try: + # Try to get length directly (works for tuples/lists) + result_len = len(first_result) + except TypeError: + # If len() fails, it's likely a NodeOutput - convert it + try: + first_result = tuple(first_result) + results[0] = first_result + result_len = len(first_result) + except: + # Single value output + result_len = 1 + results[0] = (first_result,) + + output_is_list = [False] * result_len + + if hasattr(obj, "OUTPUT_IS_LIST"): + output_is_list = obj.OUTPUT_IS_LIST + # merge node execution results + for i, is_list in zip(range(len(results[0])), output_is_list): + if is_list: + output.append([x for o in results for x in o[i]]) + else: + output.append([o[i] for o in results]) + ui = dict() + if len(uis) > 0: + ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()} + return output, ui + +class ttN_advanced_XYPlot: + version = '1.1.0' + plotPlaceholder = "_PLOT\nExample:\n\n\n[node_ID:widget_Name='value']\n\n\n[node_ID:widget_Name='value2']\n[node_ID:widget2_Name='value']\n[node_ID2:widget_Name='value']\n\netc..." + + def get_plot_points(plot_data, unique_id): + if plot_data is None or plot_data.strip() == '': + return None + else: + try: + axis_dict = {} + lines = plot_data.split('<') + new_lines = [] + temp_line = '' + + for line in lines: + if line.startswith('lora'): + temp_line += '<' + line + new_lines[-1] = temp_line + else: + new_lines.append(line) + temp_line = line + + for line in new_lines: + if line: + values_label = [] + line = line.split('>', 1) + num, label = line[0].split(':', 1) + axis_dict[num] = {"label": label} + for point in line[1].split('['): + if point.strip() != '': + node_id = point.split(':', 1)[0] + axis_dict[num][node_id] = {} + input_name = point.split(':', 1)[1].split('=')[0] + value = point.split("'")[1].split("'")[0] + values_label.append((value, input_name, node_id)) + + axis_dict[num][node_id][input_name] = value + + if label in ['v_label', 'tv_label', 'idtv_label']: + new_label = [] + for value, input_name, node_id in values_label: + if label == 'v_label': + new_label.append(value) + elif label == 'tv_label': + new_label.append(f'{input_name}: {value}') + elif label == 'idtv_label': + new_label.append(f'[{node_id}] {input_name}: {value}') + axis_dict[num]['label'] = ', '.join(new_label) + + except ValueError: + ttNl('Invalid Plot - defaulting to None...').t(f'advanced_XYPlot[{unique_id}]').warn().p() + return None + return axis_dict + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}), + "save_individuals": ("BOOLEAN", {"default": False}), + "flip_xy": ("BOOLEAN", {"default": False}), + + "x_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'X' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "y_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'Y' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + }, + "hidden": { + "prompt": ("PROMPT",), + "extra_pnginfo": ("EXTRA_PNGINFO",), + "my_unique_id": ("MY_UNIQUE_ID",), + "ttNnodeVersion": ttN_advanced_XYPlot.version, + }, + } + + RETURN_TYPES = ("ADV_XYPLOT", ) + RETURN_NAMES = ("adv_xyPlot", ) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, grid_spacing, save_individuals, flip_xy, x_plot=None, y_plot=None, prompt=None, extra_pnginfo=None, my_unique_id=None): + x_plot = ttN_advanced_XYPlot.get_plot_points(x_plot, my_unique_id) + y_plot = ttN_advanced_XYPlot.get_plot_points(y_plot, my_unique_id) + + if x_plot == {}: + x_plot = None + if y_plot == {}: + y_plot = None + + if flip_xy == "True": + x_plot, y_plot = y_plot, x_plot + + xy_plot = {"x_plot": x_plot, + "y_plot": y_plot, + "grid_spacing": grid_spacing, + "save_individuals": save_individuals,} + + return (xy_plot, ) + +class ttN_Plotting(ttN_advanced_XYPlot): + def plot(self, **args): + xy_plot = None + return (xy_plot, ) + + +def map_node_over_list(obj, input_data_all, func, allow_interrupt=False): + # check if node wants the lists + input_is_list = False + if hasattr(obj, "INPUT_IS_LIST"): + input_is_list = obj.INPUT_IS_LIST + + if len(input_data_all) == 0: + max_len_input = 0 + else: + max_len_input = max([len(x) for x in input_data_all.values()]) + + # get a slice of inputs, repeat last input when list isn't long enough + def slice_dict(d, i): + d_new = dict() + for k,v in d.items(): + d_new[k] = v[i if len(v) > i else -1] + return d_new + + results = [] + if input_is_list: + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)(**input_data_all)) + elif max_len_input == 0: + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)()) + else: + for i in range(max_len_input): + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)(**slice_dict(input_data_all, i))) + return results + +def format_value(x): + if x is None: + return None + elif isinstance(x, (int, float, bool, str)): + return x + else: + return str(x) + +def recursive_execute(prompt, outputs, current_item, extra_data, executed, prompt_id, outputs_ui, object_storage): + unique_id = current_item + inputs = prompt[unique_id]['inputs'] + class_type = prompt[unique_id]['class_type'] + if class_type == "ttN advanced xyPlot": + class_def = ttN_Plotting #Fake class to avoid recursive execute of xy_plot node + else: + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + + if unique_id in outputs: + print('returning already executed', unique_id) + return (True, None, None) + + for x in inputs: + input_data = inputs[x] + + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id not in outputs: + result = recursive_execute(prompt, outputs, input_unique_id, extra_data, executed, prompt_id, outputs_ui, object_storage) + if result[0] is not True: + # Another node failed further upstream + return result + + input_data_all = None + try: + input_data_all = get_input_data(inputs, class_def, unique_id, outputs, prompt, extra_data) + + obj = object_storage.get((unique_id, class_type), None) + if obj is None: + obj = class_def() + object_storage[(unique_id, class_type)] = obj + + output_data, output_ui = get_output_data(obj, input_data_all) + outputs[unique_id] = output_data + if len(output_ui) > 0: + outputs_ui[unique_id] = output_ui + + except comfy.model_management.InterruptProcessingException as iex: + logging.info("Processing interrupted") + + # skip formatting inputs/outputs + error_details = { + "node_id": unique_id, + } + + return (False, error_details, iex) + except Exception as ex: + typ, _, tb = sys.exc_info() + exception_type = full_type_name(typ) + input_data_formatted = {} + if input_data_all is not None: + input_data_formatted = {} + for name, inputs in input_data_all.items(): + input_data_formatted[name] = [format_value(x) for x in inputs] + + output_data_formatted = {} + for node_id, node_outputs in outputs.items(): + output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs] + + logging.error(f"!!! Exception during xyPlot processing!!! {ex}") + logging.error(traceback.format_exc()) + + error_details = { + "node_id": unique_id, + "exception_message": str(ex), + "exception_type": exception_type, + "traceback": traceback.format_tb(tb), + "current_inputs": input_data_formatted, + "current_outputs": output_data_formatted + } + return (False, error_details, ex) + + executed.add(unique_id) + + return (True, None, None) + +def recursive_will_execute(prompt, outputs, current_item, memo={}): + unique_id = current_item + + if unique_id in memo: + return memo[unique_id] + + inputs = prompt[unique_id]['inputs'] + will_execute = [] + if unique_id in outputs: + return [] + + for x in inputs: + input_data = inputs[x] + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id not in outputs: + will_execute += recursive_will_execute(prompt, outputs, input_unique_id, memo) + + memo[unique_id] = will_execute + [unique_id] + return memo[unique_id] + +def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item): + unique_id = current_item + inputs = prompt[unique_id]['inputs'] + class_type = prompt[unique_id]['class_type'] + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + + is_changed_old = '' + is_changed = '' + to_delete = False + if hasattr(class_def, 'IS_CHANGED'): + if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]: + is_changed_old = old_prompt[unique_id]['is_changed'] + if 'is_changed' not in prompt[unique_id]: + input_data_all = get_input_data(inputs, class_def, unique_id, outputs) + if input_data_all is not None: + try: + #is_changed = class_def.IS_CHANGED(**input_data_all) + is_changed = map_node_over_list(class_def, input_data_all, "IS_CHANGED") + prompt[unique_id]['is_changed'] = is_changed + except: + to_delete = True + else: + is_changed = prompt[unique_id]['is_changed'] + + if unique_id not in outputs: + return True + + if not to_delete: + if is_changed != is_changed_old: + to_delete = True + elif unique_id not in old_prompt: + to_delete = True + elif inputs == old_prompt[unique_id]['inputs']: + for x in inputs: + input_data = inputs[x] + + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id in outputs: + to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id) + else: + to_delete = True + if to_delete: + break + else: + to_delete = True + + if to_delete: + d = outputs.pop(unique_id) + del d + return to_delete + + +class xyExecutor: + def __init__(self): + self.reset() + + def reset(self): + self.outputs = {} + self.object_storage = {} + self.outputs_ui = {} + self.status_messages = [] + self.success = True + self.old_prompt = {} + + def add_message(self, event, data, broadcast: bool): + self.status_messages.append((event, data)) + + def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex): + node_id = error["node_id"] + class_type = prompt[node_id]["class_type"] + + # First, send back the status to the frontend depending + # on the exception type + if isinstance(ex, comfy.model_management.InterruptProcessingException): + mes = { + "prompt_id": prompt_id, + "node_id": node_id, + "node_type": class_type, + "executed": list(executed), + } + self.add_message("execution_interrupted", mes, broadcast=True) + else: + mes = { + "prompt_id": prompt_id, + "node_id": node_id, + "node_type": class_type, + "executed": list(executed), + + "exception_message": error["exception_message"], + "exception_type": error["exception_type"], + "traceback": error["traceback"], + "current_inputs": error["current_inputs"], + "current_outputs": error["current_outputs"], + } + self.add_message("execution_error", mes, broadcast=False) + + # Next, remove the subsequent outputs since they will not be executed + to_delete = [] + for o in self.outputs: + if (o not in current_outputs) and (o not in executed): + to_delete += [o] + if o in self.old_prompt: + d = self.old_prompt.pop(o) + del d + for o in to_delete: + d = self.outputs.pop(o) + del d + + raise Exception(ex) + + def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): + nodes.interrupt_processing(False) + + self.status_messages = [] + self.add_message("execution_start", { "prompt_id": prompt_id}, broadcast=False) + + with torch.inference_mode(): + #delete cached outputs if nodes don't exist for them + to_delete = [] + for o in self.outputs: + if o not in prompt: + to_delete += [o] + for o in to_delete: + d = self.outputs.pop(o) + del d + to_delete = [] + for o in self.object_storage: + if o[0] not in prompt: + to_delete += [o] + else: + p = prompt[o[0]] + if o[1] != p['class_type']: + to_delete += [o] + for o in to_delete: + d = self.object_storage.pop(o) + del d + + for x in prompt: + recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x) + + current_outputs = set(self.outputs.keys()) + for x in list(self.outputs_ui.keys()): + if x not in current_outputs: + d = self.outputs_ui.pop(x) + del d + + comfy.model_management.cleanup_models() + self.add_message("execution_cached", + { "nodes": list(current_outputs) , "prompt_id": prompt_id}, + broadcast=False) + executed = set() + output_node_id = None + to_execute = [] + + for node_id in list(execute_outputs): + to_execute += [(0, node_id)] + + while len(to_execute) > 0: + #always execute the output that depends on the least amount of unexecuted nodes first + memo = {} + to_execute = sorted(list(map(lambda a: (len(recursive_will_execute(prompt, self.outputs, a[-1], memo)), a[-1]), to_execute))) + output_node_id = to_execute.pop(0)[-1] + + # This call shouldn't raise anything if there's an error deep in + # the actual SD code, instead it will report the node where the + # error was raised + self.success, error, ex = recursive_execute(prompt, self.outputs, output_node_id, extra_data, executed, prompt_id, self.outputs_ui, self.object_storage) + if self.success is not True: + self.handle_execution_error(prompt_id, prompt, current_outputs, executed, error, ex) + break + + for x in executed: + self.old_prompt[x] = copy.deepcopy(prompt[x]) + + if comfy.model_management.DISABLE_SMART_MEMORY: + comfy.model_management.unload_all_models() diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNlegacyNodes.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNlegacyNodes.py new file mode 100644 index 0000000000000000000000000000000000000000..972cf508690338fef00311de22024a2dbdc3a72d --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNlegacyNodes.py @@ -0,0 +1,2078 @@ +import folder_paths +import os +import re +import json +import torch +import random +import datetime +from pathlib import Path +from urllib.request import urlopen +from typing import Dict, List, Optional, Tuple, Union, Any + +from PIL.PngImagePlugin import PngInfo +from PIL import Image, ImageDraw, ImageFont +import numpy as np +import hashlib + +import comfy.samplers +import latent_preview +from comfy.sd import CLIP, VAE +from .adv_encode import advanced_encode +from .utils import CC, ttNl, ttNpaths +from comfy.model_patcher import ModelPatcher +from nodes import MAX_RESOLUTION, ControlNetApplyAdvanced + + +class ttNloader: + def __init__(self): + self.loraDict = {lora.split('\\')[-1]: lora for lora in folder_paths.get_filename_list("loras")} + + @staticmethod + def nsp_parse(text, seed=0, noodle_key='__', nspterminology=None, pantry_path=None, title=None, my_unique_id=None): + if "__" not in text: + return text + + if nspterminology is None: + # Fetch the NSP Pantry + if pantry_path is None: + pantry_path = os.path.join(ttNpaths.tinyterraNodes, 'nsp_pantry.json') + if not os.path.exists(pantry_path): + response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') + tmp_pantry = json.loads(response.read()) + # Dump JSON locally + pantry_serialized = json.dumps(tmp_pantry, indent=4) + with open(pantry_path, "w") as f: + f.write(pantry_serialized) + del response, tmp_pantry + + # Load local pantry + with open(pantry_path, 'r') as f: + nspterminology = json.load(f) + + if seed > 0 or seed < 0: + random.seed(seed) + + # Parse Text + new_text = text + for term in nspterminology: + # Target Noodle + tkey = f'{noodle_key}{term}{noodle_key}' + # How many occurrences? + tcount = new_text.count(tkey) + + if tcount > 0: + nsp_parsed = True + + # Apply random results for each noodle counted + for _ in range(tcount): + new_text = new_text.replace( + tkey, random.choice(nspterminology[term]), 1) + seed += 1 + random.seed(seed) + + ttNl(new_text).t(f'{title}[{my_unique_id}]').p() + + + return new_text + + @staticmethod + def clean_values(values: str): + original_values = values.split("; ") + cleaned_values = [] + + for value in original_values: + cleaned_value = value.strip(';').strip() + if cleaned_value: + try: + cleaned_value = int(cleaned_value) + except ValueError: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + + cleaned_values.append(cleaned_value) + return cleaned_values + + @staticmethod + def string_to_seed(s): + h = hashlib.sha256(s.encode()).digest() + return (int.from_bytes(h, byteorder='big') & 0xffffffffffffffff) + + def load_checkpoint(self, ckpt_name, config_name=None, clip_skip=0): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + if config_name not in [None, "Default"]: + config_path = folder_paths.get_full_path("configs", config_name) + loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + else: + loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + clip = loaded_ckpt[1].clone() + if clip_skip != 0: + clip.clip_layer(clip_skip) + + # model, clip, vae + return loaded_ckpt[0], clip, loaded_ckpt[2] + + def load_unclip(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return out + + def load_vae(self, vae_name): + vae_path = folder_paths.get_full_path("vae", vae_name) + sd = comfy.utils.load_torch_file(vae_path) + loaded_vae = comfy.sd.VAE(sd=sd) + + return loaded_vae + + def load_controlNet(self, positive, negative, controlnet_name, image, strength, start_percent, end_percent): + if type(controlnet_name) == str: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_name) + controlnet = comfy.controlnet.load_controlnet(controlnet_path) + else: + controlnet = controlnet_name + + controlnet_conditioning = ControlNetApplyAdvanced().apply_controlnet(positive, negative, controlnet, image, strength, start_percent, end_percent) + base_positive, base_negative = controlnet_conditioning[0], controlnet_conditioning[1] + return base_positive, base_negative + + def load_lora(self, lora_name, model, clip, strength_model, strength_clip): + if strength_model == 0 and strength_clip == 0: + return (model, clip) + + #print('LORA NAME', lora_name) + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None or not os.path.exists(lora_path): + ttNl(f'{lora_path}').t("Skipping missing lora").error().p() + return (model, clip) + + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + + return model_lora, clip_lora + + def validate_lora_format(self, lora_string): + if not re.match(r'^$', lora_string): + ttNl(f'{lora_string}').t("Skipping invalid lora format").error().p() + return None + return lora_string + + def parse_lora_string(self, lora_string): + # Remove '' from the end, then split by ':' + parts = lora_string[6:-1].split(':') # 6 is the length of ' 0 else None + lora_name = self.loraDict.get(lora_name, lora_name) + weight1 = float(parts[1]) if len(parts) > 1 else None + weight2 = float(parts[2]) if len(parts) > 2 else weight1 + return lora_name, weight1, weight2 + + def load_lora_text(self, loras, model, clip): + # Extract potential patterns + pattern = r']+>' + matches = re.findall(pattern, loras) + + # Validate each extracted pattern + for match in matches: + match = self.validate_lora_format(match) + if match is not None: + lora_name, weight1, weight2 = self.parse_lora_string(match) + model, clip = self.load_lora(lora_name, model, clip, weight1, weight2) + + return model, clip + + def embedding_encode(self, text, token_normalization, weight_interpretation, clip, seed=None, title=None, my_unique_id=None, prepend_text=None): + text = f'{prepend_text} {text}' if prepend_text is not None else text + if seed is None: + seed = self.string_to_seed(text) + + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + embedding, pooled = advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled='enable') + return [[embedding, {"pooled_output": pooled}]] + + def embedding_encodeXL(self, text, clip, seed=0, title=None, my_unique_id=None, prepend_text=None, text2=None, prepend_text2=None, width=None, height=None, crop_width=0, crop_height=0, target_width=None, target_height=None, refiner_clip=None, ascore=None): + text = f'{prepend_text} {text}' if prepend_text is not None else text + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + target_width = target_width if target_width is not None else width + target_height = target_height if target_height is not None else height + + if text2 is not None and refiner_clip is not None: + text2 = f'{prepend_text2} {text2}' if prepend_text2 is not None else text2 + text2 = self.nsp_parse(text2, seed, title=title, my_unique_id=my_unique_id) + + tokens_refiner = refiner_clip.tokenize(text2) + cond_refiner, pooled_refiner = refiner_clip.encode_from_tokens(tokens_refiner, return_pooled=True) + refiner_conditioning = [[cond_refiner, {"pooled_output": pooled_refiner, "aesthetic_score": ascore, "width": width,"height": height}]] + else: + refiner_conditioning = None + + if text2 is None or text2.strip() == '': + text2 = text + + tokens = clip.tokenize(text) + tokens["l"] = clip.tokenize(text2)["l"] + if len(tokens["l"]) != len(tokens["g"]): + empty = clip.tokenize("") + while len(tokens["l"]) < len(tokens["g"]): + tokens["l"] += empty["l"] + while len(tokens["l"]) > len(tokens["g"]): + tokens["g"] += empty["g"] + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + conditioning = [[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_width, "crop_h": crop_height, "target_width": target_width, "target_height": target_height}]] + + + + return conditioning, refiner_conditioning + + def load_main3(self, ckpt_name, config_name, vae_name, loras, clip_skip, model_override=None, clip_override=None, optional_lora_stack=None): + # Load models + if (model_override is not None) and (clip_override is not None) and (vae_name != "Baked VAE"): + model, clip, vae = None, None, None + else: + model, clip, vae = self.load_checkpoint(ckpt_name, config_name, clip_skip) + + if model_override is not None: + model = model_override + del model_override + + if clip_override is not None: + clip = clip_override.clone() + + if clip_skip != 0: + clip.clip_layer(clip_skip) + del clip_override + + if vae_name != "Baked VAE": + vae = self.load_vae(vae_name) + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = self.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if loras not in [None, "None"]: + model, clip = self.load_lora_text(loras, model, clip) + + if not clip: + raise Exception("No CLIP found") + + return model, clip, vae + +class ttNsampler: + def __init__(self): + self.last_helds: dict[str, list] = { + "results": [], + "pipe_line": [], + } + self.device = comfy.model_management.intermediate_device() + + @staticmethod + def tensor2pil(image: torch.Tensor) -> Image.Image: + """Convert a torch tensor to a PIL image.""" + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + + @staticmethod + def pil2tensor(image: Image.Image) -> torch.Tensor: + """Convert a PIL image to a torch tensor.""" + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + @staticmethod + def enforce_mul_of_64(d): + d = int(d) + if d<=7: + d = 8 + leftover = d % 8 # 8 is the number of pixels per byte + if leftover != 0: # if the number of pixels is not a multiple of 8 + if (leftover < 4): # if the number of pixels is less than 4 + d -= leftover # remove the leftover pixels + else: # if the number of pixels is more than 4 + d += 8 - leftover # add the leftover pixels + + return int(d) + + @staticmethod + def safe_split(to_split: str, delimiter: str) -> List[str]: + """Split the input string and return a list of non-empty parts.""" + parts = to_split.split(delimiter) + parts = [part for part in parts if part not in ('', ' ', ' ')] + + while len(parts) < 2: + parts.append('None') + return parts + + def emptyLatent(self, empty_latent_aspect: str, batch_size:int, width:int = None, height:int = None) -> torch.Tensor: + if empty_latent_aspect and empty_latent_aspect != "width x height [custom]": + width, height = empty_latent_aspect.replace(' ', '').split('[')[0].split('x') + + latent = torch.zeros([batch_size, 4, int(height) // 8, int(width) // 8], device=self.device) + return latent + + def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): + device = comfy.model_management.get_torch_device() + latent_image = latent["samples"] + + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + batch_inds = latent["batch_index"] if "batch_index" in latent else None + noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + preview_format = "JPEG" + if preview_format not in ["JPEG", "PNG"]: + preview_format = "JPEG" + + previewer = False + + if preview_latent: + previewer = latent_preview.get_previewer(device, model.model.latent_format) + + pbar = comfy.utils.ProgressBar(steps) + def callback(step, x0, x, total_steps): + preview_bytes = None + if previewer: + preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) + pbar.update_absolute(step + 1, total_steps, preview_bytes) + + samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, + force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + + out = latent.copy() + out["samples"] = samples + return out + + def process_hold_state(self, pipe, image_output, my_unique_id, sdxl=False): + title = f'pipeKSampler[{my_unique_id}]' if not sdxl else f'pipeKSamplerSDXL[{my_unique_id}]' + ttNl('Held').t(title).p() + + last_pipe = self.init_state(my_unique_id, "pipe_line", pipe) if not sdxl else self.init_state(my_unique_id, "pipe_line_sdxl", pipe) + + last_results = self.init_state(my_unique_id, "results", list()) + + output = self.get_output(last_pipe) if not sdxl else self.get_output_sdxl_v2(last_pipe) + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return output + + return {"ui": {"images": last_results}, "result": output} + + def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: + """Retrieve value by its associated ID.""" + try: + for value, id_ in self.last_helds[key]: + if id_ == my_unique_id: + return value + except KeyError: + return None + + def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: + """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" + try: + for i, (value, id_) in enumerate(self.last_helds[key]): + if id_ == my_unique_id: + self.last_helds[key][i] = (new_value, id_) + return True + self.last_helds[key].append((new_value, my_unique_id)) + return False + except KeyError: + return False + + def upscale(self, samples, upscale_method, scale_by, crop): + s = samples.copy() + width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) + height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) + + if (width > MAX_RESOLUTION): + width = MAX_RESOLUTION + if (height > MAX_RESOLUTION): + height = MAX_RESOLUTION + + s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) + return (s,) + + def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: + """Upscale the samples if the upscale_method is not set to 'None'.""" + if upscale_method != "None": + samples = self.upscale(samples, upscale_method, factor, crop)[0] + return samples + + def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: + """Initialize the state by either fetching the stored value or setting a default.""" + value = self.get_value_by_id(key, my_unique_id) + if value is not None: + return value + return default + + def get_output(self, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input pipe dictionary.""" + return ( + pipe, + pipe.get("model"), + pipe.get("positive"), + pipe.get("negative"), + pipe.get("samples"), + pipe.get("vae"), + pipe.get("clip"), + pipe.get("images"), + pipe.get("seed") + ) + + def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" + return ( + sdxl_pipe, + sdxl_pipe.get("model"), + sdxl_pipe.get("positive"), + sdxl_pipe.get("negative"), + sdxl_pipe.get("vae"), + sdxl_pipe.get("refiner_model"), + sdxl_pipe.get("refiner_positive"), + sdxl_pipe.get("refiner_negative"), + sdxl_pipe.get("refiner_vae"), + sdxl_pipe.get("samples"), + sdxl_pipe.get("clip"), + sdxl_pipe.get("images"), + sdxl_pipe.get("seed") + ) + + def get_output_sdxl_v2(self, sdxl_pipe: dict, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" + return ( + sdxl_pipe, + pipe, + sdxl_pipe.get("model"), + sdxl_pipe.get("positive"), + sdxl_pipe.get("negative"), + sdxl_pipe.get("refiner_model"), + sdxl_pipe.get("refiner_positive"), + sdxl_pipe.get("refiner_negative"), + sdxl_pipe.get("samples"), + sdxl_pipe.get("vae"), + sdxl_pipe.get("clip"), + sdxl_pipe.get("images"), + sdxl_pipe.get("seed") + ) + +class ttNsave: + def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): + self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None + self.overwrite_existing = overwrite_existing + self.my_unique_id = my_unique_id + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.type = 'temp' + self.output_dir = output_dir + if self.output_dir != folder_paths.get_temp_directory(): + self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) + if not os.path.exists(self.output_dir): + self._create_directory(self.output_dir) + + @staticmethod + def _create_directory(folder: str): + """Try to create the directory and log the status.""" + ttNl(f"Folder {folder} does not exist. Attempting to create...").warn().p() + if not os.path.exists(folder): + try: + os.makedirs(folder) + ttNl(f"{folder} Created Successfully").success().p() + except OSError: + ttNl(f"Failed to create folder {folder}").error().p() + pass + + @staticmethod + def _map_filename(filename: str, filename_prefix: str) -> Tuple[int, str, Optional[int]]: + """Utility function to map filename to its parts.""" + + # Get the prefix length and extract the prefix + prefix_len = len(os.path.basename(filename_prefix)) + prefix = filename[:prefix_len] + + # Search for the primary digits + digits = re.search(r'(\d+)', filename[prefix_len:]) + + # Search for the number in brackets after the primary digits + group_id = re.search(r'\((\d+)\)', filename[prefix_len:]) + + return (int(digits.group()) if digits else 0, prefix, int(group_id.group(1)) if group_id else 0) + + @staticmethod + def _format_date(text: str, date: datetime.datetime) -> str: + """Format the date according to specific patterns.""" + date_formats = { + 'd': lambda d: d.day, + 'dd': lambda d: '{:02d}'.format(d.day), + 'M': lambda d: d.month, + 'MM': lambda d: '{:02d}'.format(d.month), + 'h': lambda d: d.hour, + 'hh': lambda d: '{:02d}'.format(d.hour), + 'm': lambda d: d.minute, + 'mm': lambda d: '{:02d}'.format(d.minute), + 's': lambda d: d.second, + 'ss': lambda d: '{:02d}'.format(d.second), + 'y': lambda d: d.year, + 'yy': lambda d: str(d.year)[2:], + 'yyy': lambda d: str(d.year)[1:], + 'yyyy': lambda d: d.year, + } + + # We need to sort the keys in reverse order to ensure we match the longest formats first + for format_str in sorted(date_formats.keys(), key=len, reverse=True): + if format_str in text: + text = text.replace(format_str, str(date_formats[format_str](date))) + return text + + @staticmethod + def _gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '', collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[str, Union[str, List[str]]]: + """Recursively gather all inputs from the prompt dictionary.""" + if prompt == None: + return None + + collected_inputs = collected_inputs or {} + prompt_inputs = prompt[str(unique_id)]["inputs"] + + for p_input, p_input_value in prompt_inputs.items(): + a_input = f"{linkInput}>{p_input}" if linkInput else p_input + + if isinstance(p_input_value, list): + ttNsave._gather_all_inputs(prompt, p_input_value[0], a_input, collected_inputs) + else: + existing_value = collected_inputs.get(a_input) + if existing_value is None: + collected_inputs[a_input] = p_input_value + elif p_input_value not in existing_value: + collected_inputs[a_input] = existing_value + "; " + p_input_value + + return collected_inputs + + @staticmethod + def _get_filename_with_padding(output_dir, filename, number_padding, group_id, ext): + """Return filename with proper padding.""" + try: + filtered = list(filter(lambda a: a[1] == filename, map(lambda x: ttNsave._map_filename(x, filename), os.listdir(output_dir)))) + last = max(filtered)[0] + + for f in filtered: + if f[0] == last: + if f[2] == 0 or f[2] == group_id: + last += 1 + counter = last + except (ValueError, FileNotFoundError): + os.makedirs(output_dir, exist_ok=True) + counter = 1 + + if group_id == 0: + return f"{filename}.{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}.{ext}" + else: + return f"{filename}_({group_id}).{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}_({group_id}).{ext}" + + @staticmethod + def filename_parser(output_dir: str, filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str, number_padding: int, group_id: int, ext: str) -> str: + """Parse the filename using provided patterns and replace them with actual values.""" + filename = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), filename_prefix) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + filename = re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), filename) + + subfolder = os.path.dirname(os.path.normpath(filename)) + filename = os.path.basename(os.path.normpath(filename)) + + output_dir = os.path.join(output_dir, subfolder) + + filename = ttNsave._get_filename_with_padding(output_dir, filename, number_padding, group_id, ext) + + return filename, subfolder + + @staticmethod + def folder_parser(output_dir: str, prompt: Dict[str, dict], my_unique_id: str): + output_dir = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), output_dir) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + return re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), output_dir) + + def images(self, images, filename_prefix, output_type, embed_workflow=True, ext="png", group_id=0): + FORMAT_MAP = { + "png": "PNG", + "jpg": "JPEG", + "jpeg": "JPEG", + "bmp": "BMP", + "tif": "TIFF", + "tiff": "TIFF", + "webp": "WEBP", + } + + if ext not in FORMAT_MAP: + raise ValueError(f"Unsupported file extension {ext}") + + if output_type in ("Hide", "Disabled"): + return list() + if output_type in ("Save", "Hide/Save"): + output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() + self.type = "output" + if output_type == "Preview": + output_dir = folder_paths.get_temp_directory() + filename_prefix = 'ttNpreview' + ext = "png" + + results=list() + for image in images: + img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) + + filename = filename_prefix.replace("%width%", str(img.size[0])).replace("%height%", str(img.size[1])) + + filename, subfolder = ttNsave.filename_parser(output_dir, filename, self.prompt, self.my_unique_id, self.number_padding, group_id, ext) + + file_path = os.path.join(output_dir, subfolder, filename) + + if (embed_workflow in (True, "True")) and (ext in ("png", "webp")): + if ext == "png": + metadata = PngInfo() + if self.prompt is not None: + metadata.add_text("prompt", json.dumps(self.prompt)) + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + metadata.add_text(x, json.dumps(self.extra_pnginfo[x])) + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, pnginfo=metadata, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + if ext == "webp": + img_exif = img.getexif() + workflow_metadata = '' + prompt_str = '' + if self.prompt is not None: + prompt_str = json.dumps(self.prompt) + img_exif[0x010f] = "Prompt:" + prompt_str + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + workflow_metadata += json.dumps(self.extra_pnginfo[x]) + + img_exif[0x010e] = "Workflow:" + workflow_metadata + exif_data = img_exif.tobytes() + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, exif=exif_data, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + else: + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + results.append({ + "filename": file_path, + "subfolder": subfolder, + "type": self.type + }) + + return results + +loader = ttNloader() +sampler = ttNsampler() + +#---------------------------------------------------------------DEPRECATED START-----------------------------------------------------------------------# +class ttNxyPlot: + def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): + self.x_node_type, self.x_type = ttNsampler.safe_split(xyPlotData.get("x_axis"), ': ') + self.y_node_type, self.y_type = ttNsampler.safe_split(xyPlotData.get("y_axis"), ': ') + + self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] + self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] + + self.grid_spacing = xyPlotData.get("grid_spacing") + self.latent_id = xyPlotData.get("latent_id") + self.output_individuals = xyPlotData.get("output_individuals") + + self.x_label, self.y_label = [], [] + self.max_width, self.max_height = 0, 0 + self.latents_plot = [] + self.image_list = [] + + self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 + self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 + + self.total = self.num_cols * self.num_rows + self.num = 0 + + self.save_prefix = save_prefix + self.image_output = image_output + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.my_unique_id = my_unique_id + + # Helper Functions + @staticmethod + def define_variable(plot_image_vars, value_type, value, index): + value_label = f"{value}" + if value_type == "seed": + seed = int(plot_image_vars["seed"]) + if index != 0: + index = 1 + if value == 'increment': + plot_image_vars["seed"] = seed + index + value_label = f"{plot_image_vars['seed']}" + + elif value == 'decrement': + plot_image_vars["seed"] = seed - index + value_label = f"{plot_image_vars['seed']}" + + elif value == 'randomize': + plot_image_vars["seed"] = random.randint(0, 0xffffffffffffffff) + value_label = f"{plot_image_vars['seed']}" + else: + plot_image_vars[value_type] = value + + if value_type in ["steps", "cfg", "denoise", "clip_skip", + "lora1_model_strength", "lora1_clip_strength", + "lora2_model_strength", "lora2_clip_strength", + "lora3_model_strength", "lora3_clip_strength"]: + value_label = f"{value_type}: {value}" + + if value_type in ["lora_model&clip_strength", "lora1_model&clip_strength", "lora2_model&clip_strength", "lora3_model&clip_strength"]: + loraNum = value_type.split("_")[0] + plot_image_vars[loraNum + "_model_strength"] = value + plot_image_vars[loraNum + "_clip_strength"] = value + + type_label = value_type.replace("_model&clip", "") + value_label = f"{type_label}: {value}" + + elif value_type == "positive_token_normalization": + value_label = f'(+) token norm.: {value}' + elif value_type == "positive_weight_interpretation": + value_label = f'(+) weight interp.: {value}' + elif value_type == "negative_token_normalization": + value_label = f'(-) token norm.: {value}' + elif value_type == "negative_weight_interpretation": + value_label = f'(-) weight interp.: {value}' + + elif value_type == "positive": + value_label = f"pos prompt {index + 1}" + elif value_type == "negative": + value_label = f"neg prompt {index + 1}" + + return plot_image_vars, value_label + + @staticmethod + def get_font(font_size): + return ImageFont.truetype(str(Path(ttNpaths.font_path)), font_size) + + @staticmethod + def update_label(label, value, num_items): + if len(label) < num_items: + return [*label, value] + return label + + @staticmethod + def rearrange_tensors(latent, num_cols, num_rows): + new_latent = [] + for i in range(num_rows): + for j in range(num_cols): + index = j * num_rows + i + new_latent.append(latent[index]) + return new_latent + + def calculate_background_dimensions(self): + border_size = int((self.max_width//8)*1.5) if self.y_type != "None" or self.x_type != "None" else 0 + bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * (self.y_type != "None") + bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * (self.x_type != "None") + + x_offset_initial = border_size if self.y_type != "None" else 0 + y_offset = border_size if self.x_type != "None" else 0 + + return bg_width, bg_height, x_offset_initial, y_offset + + def adjust_font_size(self, text, initial_font_size, label_width): + font = self.get_font(initial_font_size) + + left, _, right, _ = font.getbbox(text) + text_width = right - left + + scaling_factor = 0.9 + if text_width > (label_width * scaling_factor): + return int(initial_font_size * (label_width / text_width) * scaling_factor) + else: + return initial_font_size + + def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): + label_width = img.width if is_x_label else img.height + + # Adjust font size + font_size = self.adjust_font_size(text, initial_font_size, label_width) + font_size = min(max_font_size, font_size) # Ensure font isn't too large + font_size = max(min_font_size, font_size) # Ensure font isn't too small + + label_height = int(font_size * 1.5) if is_x_label else font_size + + label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) + d = ImageDraw.Draw(label_bg) + + font = self.get_font(font_size) + + # Check if text will fit, if not insert ellipsis and reduce text + try: + if d.textsize(text, font=font)[0] > label_width: + while d.textsize(text+'...', font=font)[0] > label_width and len(text) > 0: + text = text[:-1] + text = text + '...' + except: + if d.textlength(text, font=font) > label_width: + while d.textlength(text+'...', font=font) > label_width and len(text) > 0: + text = text[:-1] + text = text + '...' + + # Compute text width and height for multi-line text + text_lines = text.split('\n') + try: + text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) + except: + text_widths, text_heights = zip(*[(d.textlength(line, font=font), font_size) for line in text_lines]) + max_text_width = max(text_widths) + total_text_height = sum(text_heights) + + # Compute position for each line of text + lines_positions = [] + current_y = 0 + for line, line_width, line_height in zip(text_lines, text_widths, text_heights): + text_x = (label_width - line_width) // 2 + text_y = current_y + (label_height - total_text_height) // 2 + current_y += line_height + lines_positions.append((line, (text_x, text_y))) + + # Draw each line of text + for line, (text_x, text_y) in lines_positions: + d.text((text_x, text_y), line, fill='black', font=font) + + return label_bg + + def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise): + model, clip, vae, positive, negative = None, None, None, None, None + + if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": + model, clip, vae = loader.load_checkpoint(plot_image_vars['ckpt_name']) + + if plot_image_vars['lora1_name'] != "None": + model, clip = loader.load_lora(plot_image_vars['lora1_name'], model, clip, plot_image_vars['lora1_model_strength'], plot_image_vars['lora1_clip_strength']) + + if plot_image_vars['lora2_name'] != "None": + model, clip = loader.load_lora(plot_image_vars['lora2_name'], model, clip, plot_image_vars['lora2_model_strength'], plot_image_vars['lora2_clip_strength']) + + if plot_image_vars['lora3_name'] != "None": + model, clip = loader.load_lora(plot_image_vars['lora3_name'], model, clip, plot_image_vars['lora3_model_strength'], plot_image_vars['lora3_clip_strength']) + + # Check for custom VAE + if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: + vae = loader.load_vae(plot_image_vars['vae_name']) + + # CLIP skip + if not clip: + raise Exception("No CLIP found") + clip = clip.clone() + clip.clip_layer(plot_image_vars['clip_skip']) + + positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") + positive = [[positive, {"pooled_output": positive_pooled}]] + + negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") + negative = [[negative, {"pooled_output": negative_pooled}]] + + model = model if model is not None else plot_image_vars["model"] + clip = clip if clip is not None else plot_image_vars["clip"] + vae = vae if vae is not None else plot_image_vars["vae"] + positive = positive if positive is not None else plot_image_vars["positive_cond"] + negative = negative if negative is not None else plot_image_vars["negative_cond"] + + seed = plot_image_vars["seed"] + steps = plot_image_vars["steps"] + cfg = plot_image_vars["cfg"] + sampler_name = plot_image_vars["sampler_name"] + scheduler = plot_image_vars["scheduler"] + denoise = plot_image_vars["denoise"] + + if plot_image_vars["lora_name"] not in ('None', None): + model, clip = loader.load_lora(plot_image_vars["lora_name"], model, clip, plot_image_vars["lora_model_strength"], plot_image_vars["lora_clip_strength"]) + + # Sample + samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) + + # Decode images and store + latent = samples["samples"] + + # Add the latent tensor to the tensors list + latents_plot.append(latent) + + # Decode the image + image = vae.decode(latent) + + if self.output_individuals in [True, "True"]: + ttN_save = ttNsave(self.my_unique_id, self.prompt, self.extra_pnginfo) + ttN_save.images(image, self.save_prefix, self.image_output, group_id=self.num) + + # Convert the image from tensor to PIL Image and add it to the list + pil_image = ttNsampler.tensor2pil(image) + image_list.append(pil_image) + + # Update max dimensions + self.max_width = max(self.max_width, pil_image.width) + self.max_height = max(self.max_height, pil_image.height) + + # Return the touched variables + return image_list, self.max_width, self.max_height, latents_plot + + def validate_xy_plot(self): + if self.x_type == 'None' and self.y_type == 'None': + ttNl('No Valid Plot Types - Reverting to default sampling...').t(f'pipeKSampler[{self.my_unique_id}]').warn().p() + return False + else: + return True + + def plot_images_and_labels(self): + # Calculate the background dimensions + bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() + + # Create the white background image + background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) + + for row_index in range(self.num_rows): + x_offset = x_offset_initial + + for col_index in range(self.num_cols): + index = col_index * self.num_rows + row_index + img = self.image_list[index] + background.paste(img, (x_offset, y_offset)) + + # Handle X label + if row_index == 0 and self.x_type != "None": + label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) + label_y = (y_offset - label_bg.height) // 2 + background.alpha_composite(label_bg, (x_offset, label_y)) + + # Handle Y label + if col_index == 0 and self.y_type != "None": + label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) + label_bg = label_bg.rotate(90, expand=True) + + label_x = (x_offset - label_bg.width) // 2 + label_y = y_offset + (img.height - label_bg.height) // 2 + background.alpha_composite(label_bg, (label_x, label_y)) + + x_offset += img.width + self.grid_spacing + + y_offset += img.height + self.grid_spacing + + return sampler.pil2tensor(background) + + def get_latent(self, samples, latent_id): + # Extract the 'samples' tensor from the dictionary + latent_image_tensor = samples["samples"] + + # Split the tensor into individual image tensors + image_tensors = torch.split(latent_image_tensor, 1, dim=0) + + # Create a list of dictionaries containing the individual image tensors + latent_list = [{'samples': image} for image in image_tensors] + + # Set latent only to the first latent of batch + if latent_id >= len(latent_list): + ttNl(f'The selected latent_id ({latent_id}) is out of range.').t(f'pipeKSampler[{self.my_unique_id}]').warn().p() + ttNl(f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).').t(f'pipeKSampler[{self.my_unique_id}]').warn().p() + + latent_id = len(latent_list) - 1 + + return latent_list[latent_id] + + def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): + for x_index, x_value in enumerate(self.x_values): + plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) + self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) + if self.y_type != 'None': + for y_index, y_value in enumerate(self.y_values): + self.num += 1 + plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) + self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) + + ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() + self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image(plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise) + else: + self.num += 1 + ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() + self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image(plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise) + + # Rearrange latent array to match preview image grid + self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) + + # Concatenate the tensors along the first dimension (dim=0) + self.latents_plot = torch.cat(self.latents_plot, dim=0) + + return self.latents_plot + +class ttN_XYPlot: + version = '1.2.0' + lora_list = ["None"] + folder_paths.get_filename_list("loras") + lora_strengths = {"min": -4.0, "max": 4.0, "step": 0.01} + token_normalization = ["none", "mean", "length", "length+mean"] + weight_interpretation = ["comfy", "A1111", "compel", "comfy++"] + + loader_dict = { + "ckpt_name": folder_paths.get_filename_list("checkpoints"), + "vae_name": ["Baked-VAE"] + folder_paths.get_filename_list("vae"), + "clip_skip": {"min": -24, "max": -1, "step": 1}, + "lora1_name": lora_list, + "lora1_model_strength": lora_strengths, + "lora1_clip_strength": lora_strengths, + "lora1_model&clip_strength": lora_strengths, + "lora2_name": lora_list, + "lora2_model_strength": lora_strengths, + "lora2_clip_strength": lora_strengths, + "lora2_model&clip_strength": lora_strengths, + "lora3_name": lora_list, + "lora3_model_strength": lora_strengths, + "lora3_clip_strength": lora_strengths, + "lora3_model&clip_strength": lora_strengths, + "positive": [], + "positive_token_normalization": token_normalization, + "positive_weight_interpretation": weight_interpretation, + "negative": [], + "negative_token_normalization": token_normalization, + "negative_weight_interpretation": weight_interpretation, + } + + sampler_dict = { + "lora_name": lora_list, + "lora_model_strength": lora_strengths, + "lora_clip_strength": lora_strengths, + "lora_model&clip_strength": lora_strengths, + "steps": {"min": 1, "max": 100, "step": 1}, + "cfg": {"min": 0.0, "max": 100.0, "step": 1.0}, + "sampler_name": comfy.samplers.KSampler.SAMPLERS, + "scheduler": comfy.samplers.KSampler.SCHEDULERS, + "denoise": {"min": 0.0, "max": 1.0, "step": 0.01}, + "seed": ['increment', 'decrement', 'randomize'], + } + + plot_dict = {**sampler_dict, **loader_dict} + + plot_values = ["None",] + plot_values.append("---------------------") + for k in sampler_dict: + plot_values.append(f'sampler: {k}') + plot_values.append("---------------------") + for k in loader_dict: + plot_values.append(f'loader: {k}') + + def __init__(self): + pass + + rejected = ["None", "---------------------"] + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + #"info": ("INFO", {"default": "Any values not set by xyplot will be taken from the KSampler or connected pipeLoader", "multiline": True}), + "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}), + "latent_id": ("INT",{"min": 0, "max": 100, "step": 1, "default": 0, }), + "output_individuals": (["False", "True"],{"default": "False"}), + "flip_xy": (["False", "True"],{"default": "False"}), + "x_axis": (ttN_XYPlot.plot_values, {"default": 'None'}), + "x_values": ("STRING",{"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}), + "y_axis": (ttN_XYPlot.plot_values, {"default": 'None'}), + "y_values": ("STRING",{"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}), + }, + "hidden": { + "plot_dict": (ttN_XYPlot.plot_dict,), + "ttNnodeVersion": ttN_XYPlot.version, + }, + } + + RETURN_TYPES = ("XYPLOT", ) + RETURN_NAMES = ("xyPlot", ) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/legacy" + + def plot(self, grid_spacing, latent_id, output_individuals, flip_xy, x_axis, x_values, y_axis, y_values): + def clean_values(values): + original_values = values.split("; ") + cleaned_values = [] + + for value in original_values: + # Strip the semi-colon + cleaned_value = value.strip(';').strip() + + if cleaned_value == "": + continue + + # Try to convert the cleaned_value back to int or float if possible + try: + cleaned_value = int(cleaned_value) + except ValueError: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + + # Append the cleaned_value to the list + cleaned_values.append(cleaned_value) + + return cleaned_values + + if x_axis in self.rejected: + x_axis = "None" + x_values = [] + else: + x_values = clean_values(x_values) + + if y_axis in self.rejected: + y_axis = "None" + y_values = [] + else: + y_values = clean_values(y_values) + + if flip_xy == "True": + x_axis, y_axis = y_axis, x_axis + x_values, y_values = y_values, x_values + + xy_plot = {"x_axis": x_axis, + "x_vals": x_values, + "y_axis": y_axis, + "y_vals": y_values, + "grid_spacing": grid_spacing, + "latent_id": latent_id, + "output_individuals": output_individuals} + + return (xy_plot, ) + +class ttN_pipe_IN: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "pos": ("CONDITIONING",), + "neg": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + "clip": ("CLIP",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + },"optional": { + "image": ("IMAGE",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_IN.version}, + } + + RETURN_TYPES = ("PIPE_LINE", ) + RETURN_NAMES = ("pipe", ) + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/legacy" + + def flush(self, model, pos=0, neg=0, latent=0, vae=0, clip=0, image=0, seed=0): + pipe = {"model": model, + "positive": pos, + "negative": neg, + "vae": vae, + "clip": clip, + + "refiner_model": None, + "refiner_positive": None, + "refiner_negative": None, + "refiner_vae": None, + "refiner_clip": None, + + "samples": latent, + "images": image, + "seed": seed, + + "loader_settings": {} + } + return (pipe, ) + +class ttN_pipe_OUT: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pipe": ("PIPE_LINE",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_OUT.version}, + } + + RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "PIPE_LINE",) + RETURN_NAMES = ("model", "pos", "neg", "latent", "vae", "clip", "image", "seed", "pipe") + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/legacy" + + def flush(self, pipe): + model = pipe.get("model") + pos = pipe.get("positive") + neg = pipe.get("negative") + latent = pipe.get("samples") + vae = pipe.get("vae") + clip = pipe.get("clip") + image = pipe.get("images") + seed = pipe.get("seed") + + return model, pos, neg, latent, vae, clip, image, seed, pipe + +class ttN_TSC_pipeLoader: + version = '1.1.2' + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), + + "lora1_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora1_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora1_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "lora2_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora2_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora2_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "lora3_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora3_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora3_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "positive": ("STRING", {"default": "Positive","multiline": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": {"model_override": ("MODEL",), "clip_override": ("CLIP",), "optional_lora_stack": ("LORA_STACK",),}, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_TSC_pipeLoader.version, "my_unique_id": "UNIQUE_ID",}} + + RETURN_TYPES = ("PIPE_LINE" ,"MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "INT",) + RETURN_NAMES = ("pipe","model", "positive", "negative", "latent", "vae", "clip", "seed",) + + FUNCTION = "adv_pipeloader" + CATEGORY = "🌏 tinyterra/legacy" + + def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + lora3_name, lora3_model_strength, lora3_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, + empty_latent_width, empty_latent_height, batch_size, seed, model_override=None, clip_override=None, optional_lora_stack=None, prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + # Create Empty Latent + latent = sampler.emptyLatent(None, batch_size, empty_latent_width, empty_latent_height) + samples = {"samples":latent} + + # Load models + model, clip, vae = loader.load_checkpoint(ckpt_name, config_name) + + if model_override is not None: + model = model_override + + if clip_override is not None: + clip = clip_override + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = loader.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if lora1_name != "None": + model, clip = loader.load_lora(lora1_name, model, clip, lora1_model_strength, lora1_clip_strength) + + if lora2_name != "None": + model, clip = loader.load_lora(lora2_name, model, clip, lora2_model_strength, lora2_clip_strength) + + if lora3_name != "None": + model, clip = loader.load_lora(lora3_name, model, clip, lora3_model_strength, lora3_clip_strength) + + # Check for custom VAE + if vae_name != "Baked VAE": + vae = loader.load_vae(vae_name) + + # CLIP skip + if not clip: + raise Exception("No CLIP found") + + clipped = clip.clone() + if clip_skip != 0: + clipped.clip_layer(clip_skip) + + positive = loader.nsp_parse(positive, seed, title='pipeLoader Positive', my_unique_id=my_unique_id) + + positive_embeddings_final, positive_pooled = advanced_encode(clipped, positive, positive_token_normalization, positive_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]] + + negative = loader.nsp_parse(negative, seed, title='pipeLoader Negative', my_unique_id=my_unique_id) + + negative_embeddings_final, negative_pooled = advanced_encode(clipped, negative, negative_token_normalization, negative_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + negative_embeddings_final = [[negative_embeddings_final, {"pooled_output": negative_pooled}]] + image = ttNsampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) + + + pipe = {"model": model, + "positive": positive_embeddings_final, + "negative": negative_embeddings_final, + "vae": vae, + "clip": clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": {"ckpt_name": ckpt_name, + "vae_name": vae_name, + + "lora1_name": lora1_name, + "lora1_model_strength": lora1_model_strength, + "lora1_clip_strength": lora1_clip_strength, + "lora2_name": lora2_name, + "lora2_model_strength": lora2_model_strength, + "lora2_clip_strength": lora2_clip_strength, + "lora3_name": lora3_name, + "lora3_model_strength": lora3_model_strength, + "lora3_clip_strength": lora3_clip_strength, + + "refiner_ckpt_name": None, + "refiner_vae_name": None, + "refiner_lora1_name": None, + "refiner_lora1_model_strength": None, + "refiner_lora1_clip_strength": None, + "refiner_lora2_name": None, + "refiner_lora2_model_strength": None, + "refiner_lora2_clip_strength": None, + + "clip_skip": clip_skip, + "positive": positive, + "positive_l": None, + "positive_g": None, + "positive_token_normalization": positive_token_normalization, + "positive_weight_interpretation": positive_weight_interpretation, + "positive_balance": None, + "negative": negative, + "negative_l": None, + "negative_g": None, + "negative_token_normalization": negative_token_normalization, + "negative_weight_interpretation": negative_weight_interpretation, + "negative_balance": None, + "empty_latent_width": empty_latent_width, + "empty_latent_height": empty_latent_height, + "batch_size": batch_size, + "seed": seed, + "empty_samples": samples,} + } + + return (pipe, model, positive_embeddings_final, negative_embeddings_final, samples, vae, clip, seed) + +class ttN_TSC_pipeKSampler: + version = '1.0.5' + upscale_methods = ["None", "nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (cls.upscale_methods,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "crop": (cls.crop_methods,), + "sampler_state": (["Sample", "Hold"], ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}) + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "xyPlot": ("XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "embeddingsList": (folder_paths.get_filename_list("embeddings"),), + "ttNnodeVersion": ttN_TSC_pipeKSampler.version}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT",) + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "image", "seed", ) + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/legacy" + + def sample(self, pipe, lora_name, lora_model_strength, lora_clip_strength, sampler_state, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, seed=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + samp_model = optional_model if optional_model is not None else pipe["model"] + samp_positive = optional_positive if optional_positive is not None else pipe["positive"] + samp_negative = optional_negative if optional_negative is not None else pipe["negative"] + samp_samples = optional_latent if optional_latent is not None else pipe["samples"] + samp_vae = optional_vae if optional_vae is not None else pipe["vae"] + samp_clip = optional_clip if optional_clip is not None else pipe["clip"] + + if seed in (None, 'undefined'): + samp_seed = pipe["seed"] + else: + samp_seed = seed + + def process_sample_state(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, disable_noise=disable_noise): + # Load Lora + if lora_name not in (None, "None"): + samp_model, samp_clip = loader.load_lora(lora_name, samp_model, samp_clip, lora_model_strength, lora_clip_strength) + + # Upscale samples if enabled + samp_samples = sampler.handle_upscale(samp_samples, upscale_method, factor, crop) + + samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + latent = samp_samples["samples"] + samp_images = samp_vae.decode(latent) + + results = ttN_save.images(samp_images, save_prefix, image_output) + + sampler.update_value_by_id("results", my_unique_id, results) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samp_samples, + "images": samp_images, + "seed": samp_seed, + + "loader_settings": pipe["loader_settings"], + } + + sampler.update_value_by_id("pipe_line", my_unique_id, new_pipe) + + del pipe + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output(new_pipe) + + return {"ui": {"images": results}, + "result": sampler.get_output(new_pipe)} + + def process_hold_state(pipe, image_output, my_unique_id): + last_pipe = sampler.init_state(my_unique_id, "pipe_line", pipe) + + last_results = sampler.init_state(my_unique_id, "results", list()) + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output(last_pipe) + + return {"ui": {"images": last_results}, "result": sampler.get_output(last_pipe)} + + def process_xyPlot(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot): + + random.seed(seed) + + sampleXYplot = ttNxyPlot(xyPlot, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id) + + if not sampleXYplot.validate_xy_plot(): + return process_sample_state(pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent) + + plot_image_vars = { + "x_node_type": sampleXYplot.x_node_type, "y_node_type": sampleXYplot.y_node_type, + "lora_name": lora_name, "lora_model_strength": lora_model_strength, "lora_clip_strength": lora_clip_strength, + "steps": steps, "cfg": cfg, "sampler_name": sampler_name, "scheduler": scheduler, "denoise": denoise, "seed": samp_seed, + + "model": samp_model, "vae": samp_vae, "clip": samp_clip, "positive_cond": samp_positive, "negative_cond": samp_negative, + + "ckpt_name": pipe['loader_settings']['ckpt_name'], + "vae_name": pipe['loader_settings']['vae_name'], + "clip_skip": pipe['loader_settings']['clip_skip'], + "lora1_name": pipe['loader_settings']['lora1_name'], + "lora1_model_strength": pipe['loader_settings']['lora1_model_strength'], + "lora1_clip_strength": pipe['loader_settings']['lora1_clip_strength'], + "lora2_name": pipe['loader_settings']['lora2_name'], + "lora2_model_strength": pipe['loader_settings']['lora2_model_strength'], + "lora2_clip_strength": pipe['loader_settings']['lora2_clip_strength'], + "lora3_name": pipe['loader_settings']['lora3_name'], + "lora3_model_strength": pipe['loader_settings']['lora3_model_strength'], + "lora3_clip_strength": pipe['loader_settings']['lora3_clip_strength'], + "positive": pipe['loader_settings']['positive'], + "positive_token_normalization": pipe['loader_settings']['positive_token_normalization'], + "positive_weight_interpretation": pipe['loader_settings']['positive_weight_interpretation'], + "negative": pipe['loader_settings']['negative'], + "negative_token_normalization": pipe['loader_settings']['negative_token_normalization'], + "negative_weight_interpretation": pipe['loader_settings']['negative_weight_interpretation'], + } + + latent_image = sampleXYplot.get_latent(pipe["samples"]) + + latents_plot = sampleXYplot.get_labels_and_sample(plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise) + + samp_samples = {"samples": latents_plot} + images = sampleXYplot.plot_images_and_labels() + + if xyPlot["output_individuals"]: + results = ttN_save.images(images, save_prefix, image_output) + else: + results = ttN_save.images(images[-1], save_prefix, image_output) + + + sampler.update_value_by_id("results", my_unique_id, results) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samp_samples, + "images": images, + "seed": samp_seed, + + "loader_settings": pipe["loader_settings"], + } + + sampler.update_value_by_id("pipe_line", my_unique_id, new_pipe) + + del pipe + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output(new_pipe) + + return {"ui": {"images": results}, "result": sampler.get_output(new_pipe)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save"): + preview_latent = False + + if sampler_state == "Sample" and xyPlot is None: + return process_sample_state(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent) + + elif sampler_state == "Sample" and xyPlot is not None: + return process_xyPlot(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot) + + elif sampler_state == "Hold": + return process_hold_state(pipe, image_output, my_unique_id) + +class ttN_pipeKSamplerAdvanced: + version = '1.0.5' + upscale_methods = ["None", "nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (cls.upscale_methods,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "crop": (cls.crop_methods,), + "sampler_state": (["Sample", "Hold"], ), + + "add_noise": (["enable", "disable"], ), + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "return_with_leftover_noise": (["disable", "enable"], ), + + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}) + }, + "optional": + {"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "xyPlot": ("XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "embeddingsList": (folder_paths.get_filename_list("embeddings"),), + "ttNnodeVersion": ttN_pipeKSamplerAdvanced.version}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT",) + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "image", "seed", ) + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/legacy" + + def sample(self, pipe, + lora_name, lora_model_strength, lora_clip_strength, + sampler_state, add_noise, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0, + noise_seed=None, optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, start_at_step=None, end_at_step=None, return_with_leftover_noise=False): + + force_full_denoise = True + if return_with_leftover_noise == "enable": + force_full_denoise = False + + disable_noise = False + if add_noise == "disable": + disable_noise = True + + out = ttN_TSC_pipeKSampler.sample(self, pipe, lora_name, lora_model_strength, lora_clip_strength, sampler_state, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise, + optional_model, optional_positive, optional_negative, optional_latent, optional_vae, optional_clip, noise_seed, xyPlot, upscale_method, factor, crop, prompt, extra_pnginfo, my_unique_id, start_at_step, end_at_step, force_full_denoise, disable_noise) + + return out + +class ttN_pipeLoaderSDXL: + version = '1.1.2' + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + + "lora1_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora1_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora1_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "lora2_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora2_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora2_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "refiner_ckpt_name": (["None"] + folder_paths.get_filename_list("checkpoints"), ), + "refiner_vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + + "refiner_lora1_name": (["None"] + folder_paths.get_filename_list("loras"),), + "refiner_lora1_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "refiner_lora1_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "refiner_lora2_name": (["None"] + folder_paths.get_filename_list("loras"),), + "refiner_lora2_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "refiner_lora2_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "clip_skip": ("INT", {"default": -2, "min": -24, "max": 0, "step": 1}), + + "positive": ("STRING", {"default": "Positive","multiline": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "empty_latent_width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_pipeLoaderSDXL.version, "my_unique_id": "UNIQUE_ID"}} + + RETURN_TYPES = ("PIPE_LINE_SDXL" ,"MODEL", "CONDITIONING", "CONDITIONING", "VAE", "CLIP", "MODEL", "CONDITIONING", "CONDITIONING", "VAE", "CLIP", "LATENT", "INT",) + RETURN_NAMES = ("sdxl_pipe","model", "positive", "negative", "vae", "clip", "refiner_model", "refiner_positive", "refiner_negative", "refiner_vae", "refiner_clip", "latent", "seed",) + + FUNCTION = "adv_pipeloader" + CATEGORY = "🌏 tinyterra/legacy" + + def adv_pipeloader(self, ckpt_name, vae_name, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + refiner_ckpt_name, refiner_vae_name, + refiner_lora1_name, refiner_lora1_model_strength, refiner_lora1_clip_strength, + refiner_lora2_name, refiner_lora2_model_strength, refiner_lora2_clip_strength, + clip_skip, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, + empty_latent_width, empty_latent_height, batch_size, seed, prompt=None, my_unique_id=None): + + def SDXL_loader(ckpt_name, vae_name, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation,): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + # Load models + model, clip, vae = loader.load_checkpoint(ckpt_name) + + if lora1_name != "None": + model, clip = loader.load_lora(lora1_name, model, clip, lora1_model_strength, lora1_clip_strength) + + if lora2_name != "None": + model, clip = loader.load_lora(lora2_name, model, clip, lora2_model_strength, lora2_clip_strength) + + # Check for custom VAE + if vae_name not in ["Baked VAE", "Baked-VAE"]: + vae = loader.load_vae(vae_name) + + # CLIP skip + if not clip: + raise Exception("No CLIP found") + + clipped = clip.clone() + if clip_skip != 0: + clipped.clip_layer(clip_skip) + + positive = loader.nsp_parse(positive, seed, title="pipeLoaderSDXL positive", my_unique_id=my_unique_id) + + positive_embeddings_final, positive_pooled = advanced_encode(clipped, positive, positive_token_normalization, positive_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]] + + negative = loader.nsp_parse(negative, seed) + + negative_embeddings_final, negative_pooled = advanced_encode(clipped, negative, negative_token_normalization, negative_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + negative_embeddings_final = [[negative_embeddings_final, {"pooled_output": negative_pooled}]] + + return model, positive_embeddings_final, negative_embeddings_final, vae, clip + + # Create Empty Latent + latent = sampler.emptyLatent(None, batch_size, empty_latent_width, empty_latent_height) + samples = {"samples":latent} + + model, positive_embeddings, negative_embeddings, vae, clip = SDXL_loader(ckpt_name, vae_name, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation) + + if refiner_ckpt_name != "None": + refiner_model, refiner_positive_embeddings, refiner_negative_embeddings, refiner_vae, refiner_clip = SDXL_loader(refiner_ckpt_name, refiner_vae_name, + refiner_lora1_name, refiner_lora1_model_strength, refiner_lora1_clip_strength, + refiner_lora2_name, refiner_lora2_model_strength, refiner_lora2_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation) + else: + refiner_model, refiner_positive_embeddings, refiner_negative_embeddings, refiner_vae, refiner_clip = None, None, None, None, None + + image = ttNsampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) + + pipe = {"model": model, + "positive": positive_embeddings, + "negative": negative_embeddings, + "vae": vae, + "clip": clip, + + "refiner_model": refiner_model, + "refiner_positive": refiner_positive_embeddings, + "refiner_negative": refiner_negative_embeddings, + "refiner_vae": refiner_vae, + "refiner_clip": refiner_clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": {"ckpt_name": ckpt_name, + "vae_name": vae_name, + + "lora1_name": lora1_name, + "lora1_model_strength": lora1_model_strength, + "lora1_clip_strength": lora1_clip_strength, + "lora2_name": lora2_name, + "lora2_model_strength": lora2_model_strength, + "lora2_clip_strength": lora2_clip_strength, + "lora3_name": None, + "lora3_model_strength": None, + "lora3_clip_strength": None, + + "refiner_ckpt_name": refiner_ckpt_name, + "refiner_vae_name": refiner_vae_name, + "refiner_lora1_name": refiner_lora1_name, + "refiner_lora1_model_strength": refiner_lora1_model_strength, + "refiner_lora1_clip_strength": refiner_lora1_clip_strength, + "refiner_lora2_name": refiner_lora2_name, + "refiner_lora2_model_strength": refiner_lora2_model_strength, + "refiner_lora2_clip_strength": refiner_lora2_clip_strength, + + "clip_skip": clip_skip, + "positive_balance": None, + "positive": positive, + "positive_l": None, + "positive_g": None, + "positive_token_normalization": positive_token_normalization, + "positive_weight_interpretation": positive_weight_interpretation, + "negative_balance": None, + "negative": negative, + "negative_l": None, + "negative_g": None, + "negative_token_normalization": negative_token_normalization, + "negative_weight_interpretation": negative_weight_interpretation, + "empty_latent_width": empty_latent_width, + "empty_latent_height": empty_latent_height, + "batch_size": batch_size, + "seed": seed, + "empty_samples": samples,} + } + + return (pipe, model, positive_embeddings, negative_embeddings, vae, clip, refiner_model, refiner_positive_embeddings, refiner_negative_embeddings, refiner_vae, refiner_clip, samples, seed) + +class ttN_pipeKSamplerSDXL: + version = '1.0.2' + upscale_methods = ["None", "nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"sdxl_pipe": ("PIPE_LINE_SDXL",), + + "upscale_method": (cls.upscale_methods,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "crop": (cls.crop_methods,), + "sampler_state": (["Sample", "Hold"], ), + + "base_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "refiner_steps": ("INT", {"default": 20, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}) + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_vae": ("VAE",), + "optional_refiner_model": ("MODEL",), + "optional_refiner_positive": ("CONDITIONING",), + "optional_refiner_negative": ("CONDITIONING",), + "optional_refiner_vae": ("VAE",), + "optional_latent": ("LATENT",), + "optional_clip": ("CLIP",), + #"xyPlot": ("XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "embeddingsList": (folder_paths.get_filename_list("embeddings"),), + "ttNnodeVersion": ttN_pipeKSamplerSDXL.version + }, + } + + RETURN_TYPES = ("PIPE_LINE_SDXL", "MODEL", "CONDITIONING", "CONDITIONING", "VAE", "MODEL", "CONDITIONING", "CONDITIONING", "VAE", "LATENT", "CLIP", "IMAGE", "INT",) + RETURN_NAMES = ("sdxl_pipe", "model", "positive", "negative" ,"vae", "refiner_model", "refiner_positive", "refiner_negative" ,"refiner_vae", "latent", "clip", "image", "seed", ) + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/legacy" + + def sample(self, sdxl_pipe, sampler_state, + base_steps, refiner_steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, + optional_refiner_model=None, optional_refiner_positive=None, optional_refiner_negative=None, optional_refiner_vae=None, + seed=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, + start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + sdxl_pipe = {**sdxl_pipe} + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + sdxl_samples = optional_latent if optional_latent is not None else sdxl_pipe["samples"] + + sdxl_model = optional_model if optional_model is not None else sdxl_pipe["model"] + sdxl_positive = optional_positive if optional_positive is not None else sdxl_pipe["positive"] + sdxl_negative = optional_negative if optional_negative is not None else sdxl_pipe["negative"] + sdxl_vae = optional_vae if optional_vae is not None else sdxl_pipe["vae"] + sdxl_clip = optional_clip if optional_clip is not None else sdxl_pipe["clip"] + sdxl_refiner_model = optional_refiner_model if optional_refiner_model is not None else sdxl_pipe["refiner_model"] + sdxl_refiner_positive = optional_refiner_positive if optional_refiner_positive is not None else sdxl_pipe["refiner_positive"] + sdxl_refiner_negative = optional_refiner_negative if optional_refiner_negative is not None else sdxl_pipe["refiner_negative"] + sdxl_refiner_vae = optional_refiner_vae if optional_refiner_vae is not None else sdxl_pipe["refiner_vae"] + sdxl_refiner_clip = sdxl_pipe["refiner_clip"] + + if seed in (None, 'undefined'): + sdxl_seed = sdxl_pipe["seed"] + else: + sdxl_seed = seed + + def process_sample_state(sdxl_pipe, sdxl_samples, sdxl_model, sdxl_positive, sdxl_negative, sdxl_vae, sdxl_clip, sdxl_seed, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_vae, sdxl_refiner_clip, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, prompt, my_unique_id, preview_latent, disable_noise=disable_noise): + + total_steps = base_steps + refiner_steps + + # Upscale samples if enabled + sdxl_samples = sampler.handle_upscale(sdxl_samples, upscale_method, factor, crop) + + + if (refiner_steps > 0) and (sdxl_refiner_model not in [None, "None"]): + # Base Sample + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, total_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + # Refiner Sample + sdxl_samples = sampler.common_ksampler(sdxl_refiner_model, sdxl_seed, total_steps, cfg, sampler_name, scheduler, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=base_steps, last_step=10000, force_full_denoise=True, disable_noise=True) + + latent = sdxl_samples["samples"] + sdxl_images = sdxl_refiner_vae.decode(latent) + del latent + else: + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, base_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=True, disable_noise=disable_noise) + + latent = sdxl_samples["samples"] + sdxl_images = sdxl_vae.decode(latent) + del latent + + results = ttN_save.images(sdxl_images, save_prefix, image_output) + + sampler.update_value_by_id("results", my_unique_id, results) + + new_sdxl_pipe = {"model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "refiner_model": sdxl_refiner_model, + "refiner_positive": sdxl_refiner_positive, + "refiner_negative": sdxl_refiner_negative, + "refiner_vae": sdxl_refiner_vae, + "refiner_clip": sdxl_refiner_clip, + + "samples": sdxl_samples, + "images": sdxl_images, + "seed": sdxl_seed, + + "loader_settings": sdxl_pipe["loader_settings"], + } + + del sdxl_pipe + + sampler.update_value_by_id("pipe_line", my_unique_id, new_sdxl_pipe) + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output_sdxl(new_sdxl_pipe) + + return {"ui": {"images": results}, + "result": sampler.get_output_sdxl(new_sdxl_pipe)} + + def process_hold_state(sdxl_pipe, image_output, my_unique_id): + ttNl('Held').t(f'pipeKSamplerSDXL[{my_unique_id}]').p() + + last_pipe = sampler.init_state(my_unique_id, "pipe_line", sdxl_pipe) + + last_results = sampler.init_state(my_unique_id, "results", list()) + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output_sdxl(last_pipe) + + return {"ui": {"images": last_results}, "result": sampler.get_output_sdxl(last_pipe)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save"): + preview_latent = False + + if sampler_state == "Sample" and xyPlot is None: + return process_sample_state(sdxl_pipe, sdxl_samples, sdxl_model, sdxl_positive, sdxl_negative, sdxl_vae, sdxl_clip, sdxl_seed, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_vae, sdxl_refiner_clip, base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, my_unique_id, preview_latent) + + #elif sampler_state == "Sample" and xyPlot is not None: + # return process_xyPlot(sdxl_pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot) + + elif sampler_state == "Hold": + return process_hold_state(sdxl_pipe, image_output, my_unique_id) + +#---------------------------------------------------------------DEPRECATED END-----------------------------------------------------------------------# + +TTN_LEGACY_VERSIONS = { + "pipeLoader": ttN_TSC_pipeLoader.version, + "pipeKSampler": ttN_TSC_pipeKSampler.version, + "pipeKSamplerAdvanced": ttN_pipeKSamplerAdvanced.version, + "pipeLoaderSDXL": ttN_pipeLoaderSDXL.version, + "pipeKSamplerSDXL": ttN_pipeKSamplerSDXL.version, + "pipeIN": ttN_pipe_IN.version, + "pipeOUT": ttN_pipe_OUT.version, + "xyPlot": ttN_XYPlot.version, +} +NODE_CLASS_MAPPINGS = { + "ttN xyPlot": ttN_XYPlot, + "ttN pipeIN": ttN_pipe_IN, + "ttN pipeOUT": ttN_pipe_OUT, + "ttN pipeLoader": ttN_TSC_pipeLoader, + "ttN pipeKSampler": ttN_TSC_pipeKSampler, + "ttN pipeKSamplerAdvanced": ttN_pipeKSamplerAdvanced, + "ttN pipeLoaderSDXL": ttN_pipeLoaderSDXL, + "ttN pipeKSamplerSDXL": ttN_pipeKSamplerSDXL, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "ttN xyPlot": "xyPlot", + "ttN pipeIN": "pipeIN (Legacy)", + "ttN pipeOUT": "pipeOUT (Legacy)", + "ttN pipeLoader": "pipeLoader v1 (Legacy)", + "ttN pipeKSampler": "pipeKSampler v1 (Legacy)", + "ttN pipeKSamplerAdvanced": "pipeKSamplerAdvanced v1 (Legacy)", + "ttN pipeLoaderSDXL": "pipeLoaderSDXL v1 (Legacy)", + "ttN pipeKSamplerSDXL": "pipeKSamplerSDXL v1 (Legacy)", +} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNserver.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNserver.py new file mode 100644 index 0000000000000000000000000000000000000000..56b055e3c738b7ed2d91b920f90ae1ed8d345af2 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/ttNserver.py @@ -0,0 +1,32 @@ +import os +import sys + +from aiohttp import web + +import folder_paths +from server import PromptServer + +routes = PromptServer.instance.routes + +@routes.get("/ttN/reboot") +def restart(self): + try: + sys.stdout.close_log() + except Exception as e: + pass + + print(f"\nRestarting...\n\n") + if sys.platform.startswith('win32'): + return os.execv(sys.executable, ['"' + sys.executable + '"', '"' + sys.argv[0] + '"'] + sys.argv[1:]) + else: + return os.execv(sys.executable, [sys.executable] + sys.argv) + +@routes.get("/ttN/models") +def get_models(self): + ckpts = folder_paths.get_filename_list("checkpoints") + return web.json_response(list(map(lambda a: os.path.splitext(a)[0], ckpts))) + +@routes.get("/ttN/loras") +def get_loras(self): + loras = folder_paths.get_filename_list("loras") + return web.json_response(loras) \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/utils.py b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..67dd35381417dc7c26271b16dfdd313c7786649b --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes-main/ttNpy/utils.py @@ -0,0 +1,83 @@ +import os +from pathlib import Path + +import folder_paths + +class CC: + CLEAN = '\33[0m' + BOLD = '\33[1m' + ITALIC = '\33[3m' + UNDERLINE = '\33[4m' + BLINK = '\33[5m' + BLINK2 = '\33[6m' + SELECTED = '\33[7m' + + BLACK = '\33[30m' + RED = '\33[31m' + GREEN = '\33[32m' + YELLOW = '\33[33m' + BLUE = '\33[34m' + VIOLET = '\33[35m' + BEIGE = '\33[36m' + WHITE = '\33[37m' + + GREY = '\33[90m' + LIGHTRED = '\33[91m' + LIGHTGREEN = '\33[92m' + LIGHTYELLOW = '\33[93m' + LIGHTBLUE = '\33[94m' + LIGHTVIOLET = '\33[95m' + LIGHTBEIGE = '\33[96m' + LIGHTWHITE = '\33[97m' + +class ttNl: + def __init__(self, input_string): + self.header_value = f'{CC.LIGHTGREEN}[ttN] {CC.GREEN}' + self.label_value = '' + self.title_value = '' + self.input_string = f'{input_string}{CC.CLEAN}' + + def h(self, header_value): + self.header_value = f'{CC.LIGHTGREEN}[{header_value}] {CC.GREEN}' + return self + + def full(self): + self.h('tinyterraNodes') + return self + + def success(self): + self.label_value = f'Success: ' + return self + + def warn(self): + self.label_value = f'{CC.RED}Warning:{CC.LIGHTRED} ' + return self + + def error(self): + self.label_value = f'{CC.LIGHTRED}ERROR:{CC.RED} ' + return self + + def t(self, title_value): + self.title_value = f'{title_value}:{CC.CLEAN} ' + return self + + def p(self): + print(self.header_value + self.label_value + self.title_value + self.input_string) + return self + + def interrupt(self, msg): + raise Exception(msg) + +class ttNpaths: + ComfyUI = folder_paths.base_path + tinyterraNodes = Path(__file__).parent.parent + font_path = os.path.join(tinyterraNodes, 'arial.ttf') + +class AnyType(str): + """A special class that is always equal in not equal comparisons. Credit to pythongosssss""" + + def __eq__(self, _) -> bool: + return True + + def __ne__(self, __value: object) -> bool: + return False \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/.github/workflows/publish_action.yml b/zavodik/nodes/ComfyUI_tinyterraNodes/.github/workflows/publish_action.yml new file mode 100644 index 0000000000000000000000000000000000000000..2b3b1eafe4b6aef35b51be069b8061d54f8dd092 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/.github/workflows/publish_action.yml @@ -0,0 +1,24 @@ +name: Publish to Comfy registry +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "pyproject.toml" + +permissions: + issues: write + +jobs: + publish-node: + name: Publish Custom Node to registry + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'TinyTerra' }} + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Publish Custom Node + uses: Comfy-Org/publish-node-action@v1 + with: + personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/.gitignore b/zavodik/nodes/ComfyUI_tinyterraNodes/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c1ae7451560f503a55c628d0771ef3de33d729d2 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/.gitignore @@ -0,0 +1,3 @@ +config.ini +nsp_pantry.json +__pycache__ \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/LICENSE b/zavodik/nodes/ComfyUI_tinyterraNodes/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..3877ae0a7ff6f94ac222fd704e112723db776114 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/README.md b/zavodik/nodes/ComfyUI_tinyterraNodes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..449fb3fd35034505b6721056eca8b16f20ac46f6 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/README.md @@ -0,0 +1,484 @@ +# tinyterraNodes + +*A selection of custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI).* + +**Enjoy my nodes and would like to [help keep me awake](https://buymeacoffee.com/tinyterra)?** + + + + + +## Installation +Navigate to the **_ComfyUI/custom_nodes_** directory with cmd, and run: + +`git clone https://github.com/TinyTerra/ComfyUI_tinyterraNodes.git` + +### Special Features +**ttN Image Viewer** + +*Enabled by default* + ++ Adds '🌏 Fullscreen Image Viewer' to the node right-click context menu Opens a Fullscreen image viewer - containing all images generated by the selected node during the current comfy session. ++ Adds '🌏 Popout Image Viewer' to the node right-click context menu Opens a Popout image viewer - containing all images generated by the selected node during the current comfy session. ++ Adds '🌏 Set Default Fullscreen Node' to the node right-click context menu Sets the currently selected node as the default Fullscreen node ++ Adds '🌏 Clear Default Fullscreen Node' to the node right-click context menu Clears the assigned default Fullscreen node + + ++ Slideshow Mode + + Toggled On - Automatically jumps to New images as they are generated if the last image is selected (Black Background) - the UI will auto hide after a set time. + + Toggled Off - Holds to the current user selected image (Light Background) ++ UI Overlay + + Toggles display of a navigable preview of all of the selected nodes images + + Toggles display of the Image Viewer Settings button + ++ *Shortcuts* + + 'shift + F11' => _Open ttN-Fullscreen Image Viewer using selected node OR default fullscreen node_ + + 'shift + F10' => _Open ttN-Popout Image Viewer using selected node OR default fullscreen node_ + ++ *Shortcuts in Image Viewer* + + 'up arrow' => _Toggle UI Overlay_ + + 'down arrow' => _Toggle Slideshow Mode_ + + 'left arrow' => _Select Image to the left_ + + 'shift + left arrow' => _Select Image 5 to the left_ + + 'ctrl + left arrow' => _Select the first Image_ + + 'right arrow' => _Select Image to the right_ + + 'shift + right arrow' => _Select Image 5 to the right_ + + 'ctrl + right arrow' => _Select last Image_ + + 'mouse scroll' => _Zoom the current image in and out_ + + 'ctrl + mouse scroll' => _Select image to Left/Right_ + + 'left click + drag' => _Update the current image's position_ + + 'double click' => _Reset position of current image_ + + 'esc' => _Close Image Viewer_ + + 'F' => _Fit image to Viewer window_ + + Show UI with mouse hover in Slideshow mode + +**Advanced XY(Z)Plot** ++ pipeKSampler/SDXL input to generate xyz plots using any previous input nodes. + + _(Any values not set by xyPlot will be taken from the corresponding nodes)_ + ++ Advanced xyPlot can take multiple variables for each axis somewhat programmatically. + ++ Any image input - Use the 'advPlot images' node to create an xyplot from any image input. + +Syntax: +``` + +[node_ID:widget_Name='value'] + + +[node_ID:widget_Name='value2'] +[node_ID:widget2_Name='value'] +[node_ID2:widget_Name='value'] +``` +For Example: +``` +<1:v_label> +[2:ckpt_name='model.safetensors'] + +<2:custom label> +[2:ckpt_name='checkpoint.xyz'] +[2:vae_name='someVae.xyz'] +[4:text='Summer sunset'] +``` ++ labels: + + Any custom string for a custom axis label + + v_label - for a concatenation of the values being set. In the example above if both were set to v_label: + + model.safetensors + + checkpoint.xyz, someVae.xyz, Summer sunset + + tv_label - for the option title and value concatenated. In the example above if both were set to tv_label: + + ckpt_name: model.safetensors + + ckpt_name: checkpoint.xyz, vae_name: someVae.xyz, text: Summer sunset + + itv_label - for the node ID, option title and value concatenated. In the example above if both were set to itv_label: + + [2] ckpt_name: model.safetensors + + [2] ckpt_name: checkpoint.xyz, [2] vae_name: someVae.xyz, [4] text: Summer sunset ++ Node ID's: + + Suggested to use 'Badge: ID + nickname' in [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) settings to be able to view node IDs. ++ Autocomplete: + + ttN Autocomplete will activate when the advanced xyPlot node is connected to a sampler, and will show all the nodes and options available, as well as an 'add axis' option to auto add the code for a new axis number and label. ++ Search and Replace: + + if you include %search;replace% as the value it will use the current nodes value and do a search and replace using these values. + + you can include more than one to replace different strings ++ Append to original value + + if you include .append to the widget name it will append the xyPlot value to the original instead of overwriting it. + + For example: [1:loras.append='\'] ++ Z-Axis support for multi plotting + + Creates extra xyPlots with the z-axis value changes as a base ++ Node based plotting to avoid requiring manually writing syntax + + advPlot range for easily created int/float ranges + + advPlot string for delimited string 'ranges' + +**Auto Complete** + +*Enabled by default* ++ displays a popup to autocomplete embedding filenames in text widgets - to use, start typing **embedding** and select an option from the list. ++ displays a popup to autocomplete noodlesoup categories - to use, start typing **__** and select an option from the list. ++ displays a popup in ttN 'loras' input to autocomplete loras from a list. ++ Option to disable ([ttNodes] enable_embed_autocomplete = True | False) + +**Dynamic Widgets** + +*Enabled by default* + ++ Automatically hides and shows widgets depending on their relevancy ++ Option to disable ([ttNodes] enable_dynamic_widgets = True | False) + +**ttNinterface** + +*Enabled by default* + ++ Adds 'Node Dimensions 🌏' to the node right-click context menu Allows setting specific node Width and Height values as long as they are above the minimum size for the given node. ++ Adds 'Default BG Color 🌏' to the node right-click context menu Allows setting specific default background color for every node added. + ++ Adds support for 'ctrl + arrow key' Node movement This aligns the node(s) to the set ComfyUI grid spacing size and move the node in the direction of the arrow key by the grid spacing value. Holding shift in addition will move the node by the grid spacing size * 10. ++ Adds 'Reload Node 🌏' to the node right-click context menu Creates a new instance of the node with the same position, size, color and title . It attempts to retain set widget values which is useful for replacing nodes when a node/widget update occurs ++ Adds 'Slot Type Color 🌏' to the Link right-click context menu Opens a color picker dialog menu to update the color of the selected link type. ++ Adds 'Link Border 🌏' to the Link right-click context menu Toggles link line border. ++ Adds 'Link Shadow 🌏' to the Link right-click context menu Toggles link line shadow. ++ Adds 'Link Style 🌏' to the Link right-click context menu Sets the default link line type. + + +**Save image prefix parsing** + ++ Add date/time info to filenames or output folder by using: %date:yyyy-MM-dd-hh-mm-ss% ++ Parse any upstream setting into filenames or output folder by using %[widget_name]% (for the current node) +or %input_name>input_name>widget_name% (for inputting nodes) + Example: + + +  + + +**Node Versioning** + ++ All tinyterraNodes now have a version property so that if any future changes are made to widgets that would break workflows the nodes will be highlighted on load ++ Will only work with workflows created/saved after the v1.0.0 release + +**AutoUpdate** + +*Disabled by default* + ++ Option to auto-update the node pack ([ttNodes] auto_update = False | True) + + + + $\Large\color{white}{Nodes}$ + +## ttN/base + + tinyLoader + + + + tinyConditioning + + + + tinyKSampler + + +## ttN/pipe + + + pipeLoader v2 + +(Includes [ADV_CLIP_emb](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb)) + + + + + + + pipeKSampler v2 + + +Embedded with Advanced CLIP Text Encode with an additional pipe output + + + + +Old node layout: + + + +With pipeLoader and pipeKSampler: + + + + + + pipeKSamplerAdvanced v2 + +Embedded with Advanced CLIP Text Encode with an additional pipe output + + + + + + pipeLoaderSDXL v2 + +SDXL Loader and Advanced CLIP Text Encode with an additional pipe output + + + + + + + pipeKSamplerSDXL v2 + +SDXL Sampler (base and refiner in one) and Advanced CLIP Text Encode with an additional pipe output + + + +Old node layout: + + + +With pipeLoaderSDXL and pipeKSamplerSDXL: + + + + + + + + pipeEDIT + +Update/Overwrite any of the 8 original inputs in a Pipe line with new information. ++ _**Inputs -** pipe, model, conditioning, conditioning, samples, vae, clip, image, seed_ ++ _**Outputs -** pipe_ + + + + pipe > basic_pipe + +Convert ttN pipe line to basic pipe (to be compatible with [ImpactPack](https://github.com/ltdrdata/ComfyUI-Impact-Pack)), WITH original pipe throughput ++ _**Inputs -** pipe[model, conditioning, conditioning, samples, vae, clip, image, seed]_ ++ _**Outputs -** basic_pipe[model, clip, vae, conditioning, conditioning], pipe_ + + + + pipe > Detailer Pipe + +Convert ttN pipe line to detailer pipe (to be compatible with [ImpactPack](https://github.com/ltdrdata/ComfyUI-Impact-Pack)), WITH original pipe throughput ++ _**Inputs -** pipe[model, conditioning, conditioning, samples, vae, clip, image, seed], bbox_detector, sam_model_opt_ ++ _**Outputs -** detailer_pipe[model, vae, conditioning, conditioning, bbox_detector, sam_model_opt], pipe_ + + +## ttN/xyPlot + + adv xyPlot + +pipeKSampler input to generate xy plots using sampler and loader values. (Any values not set by xyPlot will be taken from the corresponding nodes) + + + + + + advPlot images + +Node to generate xyz plots from any image inputs. + + + + advPlot range + +adv_xyPlot input to generate plot syntax across a range of values. + + + + advPlot string + +adv_xyPlot input to generate plot syntax for strings via a delimiter. + + + + advPlot combo + +adv_xyPlot input to generate plot syntax for combos with various modes. + + +## ttN/image + + + imageOutput + +Preview or Save an image with one node, with image throughput. ++ _**Inputs -** image, image output[Hide, Preview, Save, Hide/Save], output path, save prefix, number padding[None, 2-9], file type[PNG, JPG, JPEG, BMP, TIFF, TIF] overwrite existing[True, False], embed workflow[True, False]_ ++ _**Outputs -** image_ + + + + + imageRemBG + +(Using [RemBG](https://github.com/danielgatis/rembg)) + +Background Removal node with optional image preview & save. ++ _**Inputs -** image, image output[Disabled, Preview, Save], save prefix_ ++ _**Outputs -** image, mask_ + +Example of a photobashing workflow using pipeNodes, imageRemBG, imageOutput and nodes from [ADV_CLIP_emb](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb) and [ImpactPack](https://github.com/ltdrdata/ComfyUI-Impact-Pack/tree/Main): + + + + + + hiresFix + +Upscale image by model, optional rescale of result image. ++ _**Inputs -** image, vae, upscale_model, rescale_after_model[true, false], rescale[by_percentage, to Width/Height], rescale method[nearest-exact, bilinear, area], factor, width, height, crop, image_output[Hide, Preview, Save], save prefix, output_latent[true, false]_ ++ _**Outputs -** image, latent_ + + +## ttN/text + + text + +Basic TextBox Loader. ++ _**Outputs -** text (STRING)_ + + + + textDebug + +Text input, to display text inside the node, with optional print to console. ++ _**inputs -** text, print_to_console_ ++ _**Outputs -** text (STRING)_ + + + + textConcat + +3 TextBOX inputs with a single concatenated output. ++ _**inputs -** text1, text2, text3 (STRING's), delimiter_ ++ _**Outputs -** text (STRING)_ + + + + 7x TXT Loader Concat + +7 TextBOX inputs concatenated with spaces into a single output, AND separate text outputs. ++ _**inputs -** text1, text2, text3, text4, text5, text6, text7 (STRING's), delimiter_ ++ _**Outputs -** text1, text2, text3, text4, text5, text6, text7, concat (STRING's)_ + + + + 3x TXT Loader MultiConcat + +3 TextBOX inputs with separate text outputs AND multiple concatenation variations (concatenated with spaces). ++ _**inputs -** text1, text2, text3 (STRING's), delimiter_ ++ _**Outputs -** text1, text2, text3, 1 & 2, 1 & 3, 2 & 3, concat (STRING's)_ + + +## ttN/util + + seed + +Basic Seed Loader. ++ _**Outputs -** seed (INT)_ + + + + float + +float loader and converter ++ _**inputs -** float (FLOAT)_ ++ _**Outputs -** float, int, text (FLOAT, INT, STRING)_ + + + + int + +int loader and converter ++ _**inputs -** int (INT)_ ++ _**Outputs -** int, float, text (INT, FLOAT, STRING)_ + + + + +## ttN/legacy + + + pipeLoader v1 + +(Modified from [Efficiency Nodes](https://github.com/LucianoCirino/efficiency-nodes-comfyui) and [ADV_CLIP_emb](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb)) + +Combination of Efficiency Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** model, vae, clip skip, (lora1, modelstrength clipstrength), (Lora2, modelstrength clipstrength), (Lora3, modelstrength clipstrength), (positive prompt, token normalization, weight interpretation), (negative prompt, token normalization, weight interpretation), (latent width, height), batch size, seed_ ++ _**Outputs -** pipe, model, conditioning, conditioning, samples, vae, clip, seed_ + + + + pipeKSampler v1 + +(Modified from [Efficiency Nodes](https://github.com/LucianoCirino/efficiency-nodes-comfyui) and [QOLS_Omar92](https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92)) + +Combination of Efficiency Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** pipe, (optional pipe overrides), xyplot, (Lora, model strength, clip strength), (upscale method, factor, crop), sampler state, steps, cfg, sampler name, scheduler, denoise, (image output [None, Preview, Save]), Save_Prefix, seed_ ++ _**Outputs -** pipe, model, conditioning, conditioning, samples, vae, clip, image, seed_ + +Old node layout: + + + +With pipeLoader and pipeKSampler: + + + + + + pipeKSamplerAdvanced v1 + +Combination of Efficiency Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** pipe, (optional pipe overrides), xyplot, (Lora, model strength, clip strength), (upscale method, factor, crop), sampler state, steps, cfg, sampler name, scheduler, starts_at_step, return_with_leftover_noise, (image output [None, Preview, Save]), Save_Prefix_ ++ _**Outputs -** pipe, model, conditioning, conditioning, samples, vae, clip, image, seed_ + + + + + pipeLoaderSDXL v1 + +SDXL Loader and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** model, vae, clip skip, (lora1, modelstrength clipstrength), (Lora2, modelstrength clipstrength), model, vae, clip skip, (lora1, modelstrength clipstrength), (Lora2, modelstrength clipstrength), (positive prompt, token normalization, weight interpretation), (negative prompt, token normalization, weight interpretation), (latent width, height), batch size, seed_ ++ _**Outputs -** sdxlpipe, model, conditioning, conditioning, vae, model, conditioning, conditioning, vae, samples, clip, seed_ + + + + pipeKSamplerSDXL v1 + +SDXL Sampler (base and refiner in one) and Advanced CLIP Text Encode with an additional pipe output ++ _**Inputs -** sdxlpipe, (optional pipe overrides), (upscale method, factor, crop), sampler state, base_steps, refiner_steps cfg, sampler name, scheduler, (image output [None, Preview, Save]), Save_Prefix, seed_ ++ _**Outputs -** pipe, model, conditioning, conditioning, vae, model, conditioning, conditioning, vae, samples, clip, image, seed_ + +Old node layout: + + + +With pipeLoaderSDXL and pipeKSamplerSDXL: + + + + + + pipeIN + +Encode up to 8 frequently used inputs into a single Pipe line. ++ _**Inputs -** model, conditioning, conditioning, samples, vae, clip, image, seed_ ++ _**Outputs -** pipe_ + + + + pipeOUT + +Decode single Pipe line into the 8 original outputs, AND a Pipe throughput. ++ _**Inputs -** pipe_ ++ _**Outputs -** model, conditioning, conditioning, samples, vae, clip, image, seed, pipe_ + + + + pipe > xyPlot + +pipeKSampler input to generate xy plots using sampler and loader values. (Any values not set by xyPlot will be taken from the corresponding pipeKSampler or pipeLoader) ++ _**Inputs -** grid_spacing, latent_id, flip_xy, x_axis, x_values, y_axis, y_values_ ++ _**Outputs -** xyPlot_ + \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/__init__.py b/zavodik/nodes/ComfyUI_tinyterraNodes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc067faf7a19a1b901333a2b827d7ed2e26bdc68 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/__init__.py @@ -0,0 +1,162 @@ +from .ttNpy.tinyterraNodes import TTN_VERSIONS +from .ttNpy import ttNserver # Do Not Remove +import configparser +import folder_paths +import subprocess +import shutil +import os + +# ------- CONFIG -------- # +cwd_path = os.path.dirname(os.path.realpath(__file__)) +js_path = os.path.join(cwd_path, "js") +comfy_path = folder_paths.base_path + +config_path = os.path.join(cwd_path, "config.ini") + +optionValues = { + "auto_update": ('true', 'false'), + "enable_embed_autocomplete": ('true', 'false'), + "enable_interface": ('true', 'false'), + "enable_fullscreen": ('true', 'false'), + "enable_dynamic_widgets": ('true', 'false'), + "enable_dev_nodes": ('true', 'false'), + } + +def get_config(): + """Return a configparser.ConfigParser object.""" + config = configparser.ConfigParser() + config.read(config_path) + return config + +def update_config(): + #section > option > value + for node, version in TTN_VERSIONS.items(): + config_write("Versions", node, version) + + for option, value in optionValues.items(): + config_write("Option Values", option, value) + + section_data = { + "ttNodes": { + "auto_update": False, + "enable_interface": True, + "enable_fullscreen": True, + "enable_embed_autocomplete": True, + "enable_dynamic_widgets": True, + "enable_dev_nodes": False, + } + } + + for section, data in section_data.items(): + for option, value in data.items(): + if config_read(section, option) is None: + config_write(section, option, value) + + # Load the configuration data into a dictionary. + config_data = config_load() + + # Iterate through the configuration data. + for section, options in config_data.items(): + if section == "Versions": + continue + for option in options: + # If the option is not in `optionValues` or in `section_data`, remove it. + if (option not in optionValues and + (section not in section_data or option not in section_data[section])): + config_remove(section, option) + +def config_load(): + """Load the entire configuration into a dictionary.""" + config = get_config() + return {section: dict(config.items(section)) for section in config.sections()} + +def config_read(section, option): + """Read a configuration option.""" + config = get_config() + return config.get(section, option, fallback=None) + +def config_write(section, option, value): + """Write a configuration option.""" + config = get_config() + if not config.has_section(section): + config.add_section(section) + config.set(section, str(option), str(value)) + + with open(config_path, 'w') as f: + config.write(f) + +def config_remove(section, option): + """Remove an option from a section.""" + config = get_config() + if config.has_section(section): + config.remove_option(section, option) + with open(config_path, 'w') as f: + config.write(f) + +def config_value_validator(section, option, default): + value = str(config_read(section, option)).lower() + if value not in optionValues[option]: + print(f'\033[92m[{section} Config]\033[91m {option} - \'{value}\' not in {optionValues[option]}, reverting to default.\033[0m') + config_write(section, option, default) + return default + else: + return value + +# Create a config file if not exists +if not os.path.isfile(config_path): + with open(config_path, 'w') as f: + pass + +update_config() + +# Autoupdate if True +if config_value_validator("ttNodes", "auto_update", 'false') == 'true': + try: + with subprocess.Popen(["git", "pull"], cwd=cwd_path, stdout=subprocess.PIPE) as p: + p.wait() + result = p.communicate()[0].decode() + if result != "Already up to date.\n": + print("\033[92m[t ttNodes Updated t]\033[0m") + except: + pass + +# --------- WEB ---------- # +# Remove old web JS folder +web_extension_path = os.path.join(comfy_path, "web", "extensions", "tinyterraNodes") + +if os.path.exists(web_extension_path): + try: + shutil.rmtree(web_extension_path) + except: + print("\033[92m[ttNodes] \033[0;31mFailed to remove old web extension.\033[0m") + +js_files = { + "interface": "enable_interface", + "imgViewer": "enable_fullscreen", + "embedAC": "enable_embed_autocomplete", + "dynamicWidgets": "enable_dynamic_widgets", +} +for js_file, config_key in js_files.items(): + file_path = os.path.join(js_path, f"ttN{js_file}.js") + if config_value_validator("ttNodes", config_key, 'true') == 'false' and os.path.isfile(file_path): + os.rename(file_path, f"{file_path}.disable") + elif config_value_validator("ttNodes", config_key, 'true') == 'true' and os.path.isfile(f"{file_path}.disable"): + os.rename(f"{file_path}.disable", file_path) + +# Enable Dev Nodes if True +if config_value_validator("ttNodes", "enable_dev_nodes", 'true') == 'true': + from .ttNdev import NODE_CLASS_MAPPINGS as ttNdev_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as ttNdev_DISPLAY_NAME_MAPPINGS +else: + ttNdev_CLASS_MAPPINGS = {} + ttNdev_DISPLAY_NAME_MAPPINGS = {} + +# ------- MAPPING ------- # +from .ttNpy.tinyterraNodes import NODE_CLASS_MAPPINGS as TTN_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as TTN_DISPLAY_NAME_MAPPINGS +from .ttNpy.ttNlegacyNodes import NODE_CLASS_MAPPINGS as LEGACY_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as LEGACY_DISPLAY_NAME_MAPPINGS + +NODE_CLASS_MAPPINGS = {**TTN_CLASS_MAPPINGS, **LEGACY_CLASS_MAPPINGS, **ttNdev_CLASS_MAPPINGS} +NODE_DISPLAY_NAME_MAPPINGS = {**TTN_DISPLAY_NAME_MAPPINGS, **LEGACY_DISPLAY_NAME_MAPPINGS, **ttNdev_DISPLAY_NAME_MAPPINGS} + +WEB_DIRECTORY = "./js" + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', 'WEB_DIRECTORY'] diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b7f1bf3ba1742b85239e50471d8e9dd8ff4fb62 Binary files /dev/null and b/zavodik/nodes/ComfyUI_tinyterraNodes/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/arial.ttf b/zavodik/nodes/ComfyUI_tinyterraNodes/arial.ttf new file mode 100644 index 0000000000000000000000000000000000000000..b251fe231398e2419aeec5bf8b4423152489e813 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/arial.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:413c78f91bd39e134f3c0bb204b1d5a90f29df9efddc8fd26950a178058d5d74 +size 367112 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/config.ini b/zavodik/nodes/ComfyUI_tinyterraNodes/config.ini new file mode 100644 index 0000000000000000000000000000000000000000..e152c2dfce3c8b7326373a4e2ff4ec51cd786e07 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/config.ini @@ -0,0 +1,53 @@ +[Versions] +tinyterranodes = 2.0.9 +pipeloader_v2 = 2.1.0 +tinyksampler = 2.3.1 +tinyloader = 1.1.0 +tinyconditioning = 1.0.2 +pipeksampler_v2 = 2.3.1 +pipeksampleradvanced_v2 = 2.3.0 +pipeloadersdxl_v2 = 2.1.0 +pipeksamplersdxl_v2 = 2.3.1 +pipeedit = 1.1.1 +pipe2basic = 1.1.0 +pipe2detailer = 1.2.0 +advanced xyplot = 1.2.1 +advplot images = 1.0.0 +advplot range = 1.1.0 +advplot string = 1.1.0 +advplot combo = 1.0.0 +advplot merge = 1.0.0 +pipeencodeconcat = 1.0.2 +multilorastack = 1.1.1 +multimodelmerge = 1.1.0 +debuginput = 1.0.0 +text = 1.0.0 +textdebug = 1.0. +concat = 1.0.0 +text3box_3wayconcat = 1.0.0 +text7box_concat = 1.0.0 +textcycleline = 1.0.0 +textoutput = 1.0.1 +imageoutput = 1.2.0 +imagerembg = 1.0.0 +hiresfixscale = 1.1.0 +int = 1.0.0 +float = 1.0.0 +seed = 1.0.0 + +[Option Values] +auto_update = ('true', 'false') +enable_embed_autocomplete = ('true', 'false') +enable_interface = ('true', 'false') +enable_fullscreen = ('true', 'false') +enable_dynamic_widgets = ('true', 'false') +enable_dev_nodes = ('true', 'false') + +[ttNodes] +auto_update = False +enable_interface = True +enable_fullscreen = True +enable_embed_autocomplete = True +enable_dynamic_widgets = True +enable_dev_nodes = False + diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/images/icon.jpg b/zavodik/nodes/ComfyUI_tinyterraNodes/images/icon.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b08a1b092a8875071fa34a80b2b1b78c2dfe5f3 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/images/icon.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53eec0f8217d979a0b1be6ca7d1c027a968329f7bf8b4ce37526d51a842a0e41 +size 43380 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_pipeSDXL.png b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_pipeSDXL.png new file mode 100644 index 0000000000000000000000000000000000000000..8351641b63713f82b0fb93aa9d9502a16bd7c070 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_pipeSDXL.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a2952324499183287e42b5b538e6dc2d47d7fc6d197b076c80a1fb020bbadf6 +size 846316 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_prefixParsing.png b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_prefixParsing.png new file mode 100644 index 0000000000000000000000000000000000000000..9d2e09da4bae37a8c1b9c0ee0b5b85a51d3ca437 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_prefixParsing.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8badcc39ea328b71b301aa9b4a1b014a9c797e25e57724947e5e84f9e4e9080 +size 953564 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_trueHRFix.png b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_trueHRFix.png new file mode 100644 index 0000000000000000000000000000000000000000..249202c370937209cc30f8dd2741ea53b2917ffa --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_trueHRFix.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9bffd61a21fc143c3eb6bb40d9acd6c056ab2b959a8931dd68f3b081a36aa8d +size 1102664 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_xyPlot.png b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_xyPlot.png new file mode 100644 index 0000000000000000000000000000000000000000..5e6165adf943bad6572a67dcc1f8e65c9f2f8506 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/images/tinyterra_xyPlot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90f000697e72101a6ec95bf00d810c073460abbc0208e9c5c5e3569cdf94dfe5 +size 849838 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttN.css b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttN.css new file mode 100644 index 0000000000000000000000000000000000000000..2309ebdbade19bfc662608af8cbb955aa0653d69 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttN.css @@ -0,0 +1,130 @@ +.litegraph.litecontextmenu .litemenu-title .tinyterra-contextmenu-title, +.litegraph.litecontextmenu .litemenu-entry.tinyterra-contextmenu-item { + background-color: #212121 !important; + margin: 0; + display: flex; + flex-direction: row; + align-items: center; + justify-content: start; +} + +.litegraph.litecontextmenu .litemenu-title .tinyterra-contextmenu-title, +.litegraph.litecontextmenu .litemenu-entry.tinyterra-contextmenu-label { + background-color: #000 !important; + margin: 0; + cursor: default; + opacity: 1; + padding: 4px; + font-weight: bold; +} + + + + +/* Dropdown */ +.ttN-dropdown, .ttN-nested-dropdown { + position: relative; + box-sizing: border-box; + background-color: #171717; + box-shadow: 0 4px 4px rgba(255, 255, 255, .25); + padding: 0; + margin: 0; + list-style: none; + z-index: 1000; + overflow: visible; + max-height: fit-content; + max-width: fit-content; +} + +.ttN-dropdown { + position: absolute; + border-radius: 0; +} + +.ttN-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + min-width: 220px; + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; +} + +.ttN-nested-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar { + width: 10px; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track { + background: #121212; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb { + background: #4b4b4b; + border-radius: 8px; +} + +.ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover, +.ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover { + background: #646464; +} + +/* Style for final items */ +.ttN-dropdown li.item, .ttN-nested-dropdown li.item { + font-weight: normal; + min-width: max-content; +} + +/* Style for folders (parent items) */ +.ttN-dropdown li.folder, .ttN-nested-dropdown li.folder { + cursor: default; + position: relative; + border-right: 3px solid #005757; +} + +.ttN-dropdown li.folder::after, .ttN-nested-dropdown li.folder::after { + content: ">"; + position: absolute; + right: 2px; + font-weight: normal; +} + +.ttN-dropdown li, .ttN-nested-dropdown li { + padding: 4px 10px; + cursor: pointer; + font-family: system-ui; + font-size: 0.7rem; + position: relative; +} + +/* Style for nested dropdowns */ +.ttN-nested-dropdown { + position: absolute; + top: 0; + left: 100%; + margin: 0; + border: none; + display: none; +} + +.ttN-dropdown li.selected > .ttN-nested-dropdown, +.ttN-nested-dropdown li.selected > .ttN-nested-dropdown { + display: block; + border: none; +} + +.ttN-dropdown li.selected, +.ttN-nested-dropdown li.selected { + background-color: #222222; + border: none; +} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttN.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttN.js new file mode 100644 index 0000000000000000000000000000000000000000..82d86804f118d612f2644a6a9abbae3e76f98577 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttN.js @@ -0,0 +1,707 @@ +import { app } from "../../scripts/app.js"; +import { tinyterraReloadNode, wait, rebootAPI, getConfig, convertToInput, hideWidget } from "./utils.js"; +import { openFullscreenApp, openPopoutViewer, _setDefaultFullscreenNode } from "./ttNimgViewer.js"; + +class TinyTerra extends EventTarget { + constructor() { + super(); + this.ctrlKey = false + this.altKey = false + this.shiftKey = false + this.downKeys = {} + this.processingMouseDown = false + this.processingMouseUp = false + this.processingMouseMove = false + window.addEventListener("keydown", (e) => { + this.handleKeydown(e) + }) + window.addEventListener("keyup", (e) => { + this.handleKeyup(e) + }) + this.initialiseContextMenu() + this.initialiseNodeMenu() + this.injectTtnCss() + } + async initialiseContextMenu() { + const that = this; + setTimeout(async () => { + const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function (...args) { + const options = getCanvasMenuOptions.apply(this, [...args]); + options.push(null); + options.push({ + content: `🌏 tinyterraNodes`, + className: "ttN-contextmenu-item ttN-contextmenu-main-item", + submenu: { + options: that.getTinyTerraContextMenuItems(), + }, + }); + + // Remove consecutive null entries + let i = 0; + while (i < options.length) { + if (options[i] === null && (i === 0 || options[i - 1] === null)) { + options.splice(i, 1); + } else { + i++; + } + } + return options; + }; + }, 1000); + } + getTinyTerraContextMenuItems() { + const that = this + return [ + { + content: "🌏 Nodes", + disabled: true, + className: "tinyterra-contextmenu-item tinyterra-contextmenu-label", + }, + { + content: "base", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('base/', args[3], args[2]) + } + }, + { + content: "pipe", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('pipe/', args[3], args[2]) + } + }, + { + content: "xyPlot", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('xyPlot/', args[3], args[2]) + } + }, + { + content: "text", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('text/', args[3], args[2]) + } + }, + { + content: "image", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('image/', args[3], args[2]) + } + }, + { + content: "util", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback: (...args) => { + that.addTTNodeMenu('util/', args[3], args[2]) + } + }, + { + content: "🌏 Add Group", + disabled: true, + className: "tinyterra-contextmenu-item tinyterra-contextmenu-label", + }, + { + content: "Basic Sampling", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback : function(value, event, mouseEvent, contextMenu){ + that.addGroupMenu('basic', contextMenu, mouseEvent) + } + }, + { + content: "Upscaling", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback : function(value, event, mouseEvent, contextMenu){ + that.addGroupMenu('upscale', contextMenu, mouseEvent) + } + }, + { + content: "xyPlotting", + className: "tinyterra-contextmenu-item", + has_submenu: true, + callback : function(value, event, mouseEvent, contextMenu){ + that.addGroupMenu('xyPlot', contextMenu, mouseEvent) + } + }, + { + content: "🌏 Extras", + disabled: true, + className: "tinyterra-contextmenu-item tinyterra-contextmenu-label", + }, + // { + // content: "⚙️ Settings (tinyterra)", + // disabled: true, //!!this.settingsDialog, + // className: "tinyterra-contextmenu-item", + // callback: (...args) => { + // this.settingsDialog = new tinyterraConfigDialog().show(); + // this.settingsDialog.addEventListener("close", (e) => { + // this.settingsDialog = null; + // }); + // }, + // }, + { + content: "🛑 Reboot Comfy", + className: "tinyterra-contextmenu-item", + callback: (...args) => { + rebootAPI(); + wait(1000).then(() => { + window.location.reload(); + }); + } + }, + { + content: "⭐ Star on Github", + className: "tinyterra-contextmenu-item", + callback: (...args) => { + window.open("https://github.com/TinyTerra/ComfyUI_tinyterraNodes", "_blank"); + }, + }, + { + content: "☕ Support TinyTerra", + className: "tinyterra-contextmenu-item", + callback: (...args) => { + window.open("https://buymeacoffee.com/tinyterra", "_blank"); + }, + }, + + ]; + } + addNode = async (node, pos) => { + var canvas = LGraphCanvas.active_canvas; + canvas.graph.beforeChange(); + var node = LiteGraph.createNode(node); + if (node) { + node.pos = pos; + canvas.graph.add(node); + } + canvas.graph.afterChange(); + return node + } + addGroup = async (contextMenu, nodes) => { + var first_event = contextMenu.getFirstEvent(); + var canvas = LGraphCanvas.active_canvas; + var canvasOffset = canvas.convertEventToCanvasOffset(first_event); + + // Create Nodes + for (const nodeData of Object.values(nodes)) { + var node = await this.addNode(nodeData.nodeType, canvasOffset); + nodeData.graphNode = node; + canvasOffset = [canvasOffset[0] + nodeData.width + 10, canvasOffset[1]]; + } + + // Handle Widget Changes + for (const nodeData of Object.values(nodes)) { + var node = nodeData.graphNode; + if (nodeData.widgets) { + for (const [widget, value] of Object.entries(nodeData.widgets)) { + if (value == 'toInput') { + const config = getConfig(widget, node) + convertToInput(node, node.widgets.find((w) => w.name === widget), config); + } else { + if (node) { + node.widgets.find((w) => w.name === widget).value = value + } + } + } + } + } + + // Handle Connections + for (const nodeData of Object.values(nodes)) { + var node = nodeData.graphNode; + if (nodeData.connections) { + for (const c of nodeData.connections) { + node.connect(parseInt(c[0]), nodes[c[1]].graphNode.id, c[2]); + } + } + } + } + addTTNodeMenu(category, prev_menu, e, callback=null) { + var canvas = LGraphCanvas.active_canvas; + var ref_window = canvas.getCanvasWindow(); + var graph = canvas.graph; + const base_category = '🌏 tinyterra/' + category + + var entries = []; + + var nodes = LiteGraph.getNodeTypesInCategory(base_category.slice(0, -1), canvas.filter || graph.filter ); + nodes.map(function(node){ + if (node.skip_list) + return; + + var entry = { + value: node.type, + content: node.title, + className: "tinyterra-contextmenu-item", + has_submenu: false, + callback : function(value, event, mouseEvent, contextMenu){ + var first_event = contextMenu.getFirstEvent(); + canvas.graph.beforeChange(); + var node = LiteGraph.createNode(value.value); + if (node) { + node.pos = canvas.convertEventToCanvasOffset(first_event); + canvas.graph.add(node); + } + if(callback) + callback(node); + canvas.graph.afterChange(); + } + } + + entries.push(entry); + }); + + new LiteGraph.ContextMenu( entries, { event: e, parentMenu: prev_menu }, ref_window ); + } + addGroupMenu(group, prev_menu, e) { + const that = this; + var canvas = LGraphCanvas.active_canvas; + var ref_window = canvas.getCanvasWindow(); + let entries; + switch (group) { + case "basic": + entries = [ + { content: "Base ttN", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN tinyLoader', + graphNode: null, + width: 315, + connections: [ + [0, 'Conditioning', 'model'], + [1, 'KSampler', 'latent'], + [2, 'KSampler', 'vae'], + [3, 'Conditioning', 'clip'], + ], + }, + 'Conditioning': { + nodeType: 'ttN conditioning', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'model'], + [1, 'KSampler', 'positive'], + [2, 'KSampler', 'negative'], + [3, 'KSampler', 'clip'], + ], + }, + 'KSampler': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe Basic", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN pipeLoader_v2', + graphNode: null, + width: 315, + connections: [ + [0, 'KSampler', 'pipe'] + ], + }, + 'KSampler': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe SDXL", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN pipeLoaderSDXL_v2', + graphNode: null, + width: 365, + connections: [ + [0, 'KSampler', 'sdxl_pipe'] + ], + }, + 'KSampler': { + nodeType: 'ttN pipeKSamplerSDXL_v2', + graphNode: null, + width: 365, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + ]; + break; + + case "upscale": + entries = [ + { content: "Base upscale", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN tinyLoader', + graphNode: null, + width: 315, + connections: [ + [0, 'Conditioning', 'model'], + [1, 'KSampler', 'latent'], + [2, 'KSampler', 'vae'], + [3, 'Conditioning', 'clip'], + ], + }, + 'Conditioning': { + nodeType: 'ttN conditioning', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'model'], + [1, 'KSampler', 'positive'], + [2, 'KSampler', 'negative'], + [3, 'KSampler', 'clip'], + ], + }, + 'KSampler': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + connections: [ + [0, 'KSampler2', 'model'], + [1, 'KSampler2', 'positive'], + [2, 'KSampler2', 'negative'], + [3, 'KSampler2', 'latent'], + [4, 'KSampler2', 'vae'], + [5, 'KSampler2', 'clip'], + [6, 'KSampler2', 'input_image_override'] + ], + widgets: { + image_output: 'Preview', + } + }, + 'KSampler2': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + widgets: { + upscale_method: '[hiresFix] nearest-exact', + image_output: 'Preview', + denoise: 0.5, + steps: 15 + } + }, + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe Upscale", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'loader1': { + nodeType: 'ttN pipeLoader_v2', + graphNode: null, + width: 315, + connections: [ + [0, 'ksampler', 'pipe'] + ], + }, + 'ksampler': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + connections: [ + [0, 'ksampler2', 'pipe'] + ], + widgets: { + image_output: 'Preview' + }, + }, + 'ksampler2': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + widgets: { + upscale_method: '[hiresFix] nearest-exact', + denoise: 0.5, + seed: 'toInput', + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + ]; + break; + + case "xyPlot": + entries = [ + { content: "Base xyPlot", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN tinyLoader', + graphNode: null, + width: 315, + connections: [ + [0, 'Conditioning', 'model'], + [1, 'KSampler', 'latent'], + [2, 'KSampler', 'vae'], + [3, 'Conditioning', 'clip'], + ], + }, + 'Conditioning': { + nodeType: 'ttN conditioning', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'model'], + [1, 'KSampler', 'positive'], + [2, 'KSampler', 'negative'], + [3, 'KSampler', 'clip'], + ], + }, + 'xyPlot': { + nodeType: 'ttN advanced xyPlot', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'adv_xyPlot'], + ], + }, + 'KSampler': { + nodeType: 'ttN KSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + { content: "Pipe xyPlot", + className: "tinyterra-contextmenu-item", + callback : async function(value, event, mouseEvent, contextMenu){ + const nodes = { + 'Loader': { + nodeType: 'ttN pipeLoader_v2', + graphNode: null, + width: 315, + connections: [ + [0, 'KSampler', 'pipe'], + ], + }, + 'xyPlot': { + nodeType: 'ttN advanced xyPlot', + graphNode: null, + width: 400, + connections: [ + [0, 'KSampler', 'adv_xyPlot'], + ], + }, + 'KSampler': { + nodeType: 'ttN pipeKSampler_v2', + graphNode: null, + width: 262, + widgets: { + image_output: 'Preview' + } + } + } + that.addGroup(contextMenu, nodes) + } + }, + ] + } + new LiteGraph.ContextMenu( entries, { event: e, parentMenu: prev_menu }, ref_window ); + } + async initialiseNodeMenu() { + const that = this; + setTimeout(async () => { + const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions; + LGraphCanvas.prototype.getNodeMenuOptions = function (node) { + const options = getNodeMenuOptions.apply(this, arguments); + node.setDirtyCanvas(true, true); + const ttNoptions = that.getTinyTerraNodeMenuItems(node) + options.splice(options.length - 1, 0, ...ttNoptions, null); + + return options; + }; + },500) + } + getTinyTerraNodeMenuItems(node) { + return [ + { + content: "🌏 Fullscreen Image Viewer", + callback: () => { openFullscreenApp(node) } + }, + { + content: "🌏 Pop-Out Image Viewer", + callback: () => { openPopoutViewer(node) } + }, + { + content: "🌏 Set Default Viewer Node", + callback: _setDefaultFullscreenNode + }, + { + content: "🌏 Clear Default Viewer Node", + callback: function () { + sessionStorage.removeItem('Comfy.Settings.ttN.default_fullscreen_node'); + } + }, + null, + { + content: "🌏 Default Node BG Color", + has_submenu: true, + callback: LGraphCanvas.ttNsetDefaultBGColor + }, + { + content: "🌏 Node Dimensions", + callback: () => { LGraphCanvas.prototype.ttNsetNodeDimension(node); } + }, + { + content: "🌏 Reload Node", + callback: () => { + const active_canvas = LGraphCanvas.active_canvas; + if (!active_canvas.selected_nodes || Object.keys(active_canvas.selected_nodes).length <= 1) { + tinyterraReloadNode(node); + } else { + for (var i in active_canvas.selected_nodes) { + tinyterraReloadNode(active_canvas.selected_nodes[i]); + } + } + } + }, + ] + } + handleKeydown(e) { + this.ctrlKey = !!e.ctrlKey + this.altKey = !!e.altKey + this.shiftKey = !!e.shiftKey + this.downKeys[e.key.toLocaleUpperCase()] = true + this.downKeys["^" + e.key.toLocaleUpperCase()] = true + } + handleKeyup(e) { + this.ctrlKey = !!e.ctrlKey + this.altKey = !!e.altKey + this.shiftKey = !!e.shiftKey + this.downKeys[e.key.toLocaleUpperCase()] = false + this.downKeys["^" + e.key.toLocaleUpperCase()] = false + } + injectTtnCss() { + const link = document.createElement("link"); + link.rel = "stylesheet"; + link.type = "text/css"; + link.href = "extensions/ComfyUI_tinyterraNodes/ttN.css"; + + link.onerror = function () { + if (this.href.includes("comfyui_tinyterranodes")) { + console.error("tinyterraNodes: Failed to load CSS file. Please check nodepack folder name."); + return; + } + this.href = "extensions/comfyui_tinyterranodes/ttN.css" + } + document.head.appendChild(link); + } +} + +export const tinyterra = new TinyTerra(); +window.tinyterra = tinyterra; + +app.registerExtension({ + name: "comfy.ttN", + setup() { + if (!localStorage.getItem("ttN.pysssss")) { + const ttNckpts = ['ttN pipeLoader_v2', "ttN pipeLoaderSDXL_v2", "ttN tinyLoader"] + let pysCheckpoints = app.ui.settings.getSettingValue('pysssss.ModelInfo.CheckpointNodes') + if (pysCheckpoints) { + for (let ckpt of ttNckpts) { + if (!pysCheckpoints.includes(ckpt)) { + pysCheckpoints = `${pysCheckpoints},${ckpt}` + } + } + app.ui.settings.setSettingValue('pysssss.ModelInfo.CheckpointNodes', pysCheckpoints) + } + + const ttNloras = ['ttN KSampler_v2', 'ttN pipeKSampler_v2', 'ttN pipeKSamplerAdvanced_v2', 'ttN pipeKSamplerSDXL_v2', ] + let pysLoras = app.ui.settings.getSettingValue('pysssss.ModelInfo.LoraNodes') + if (pysLoras) { + for (let lora of ttNloras) { + if (!pysLoras.includes(lora)) { + pysLoras = `${pysLoras},${lora}` + } + } + app.ui.settings.setSettingValue('pysssss.ModelInfo.LoraNodes', pysLoras) + } + if (pysCheckpoints && pysLoras) { + localStorage.setItem("ttN.pysssss", true) + } + } + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name.startsWith("ttN")) { + const origOnConfigure = nodeType.prototype.onConfigure; + nodeType.prototype.onConfigure = function () { + const r = origOnConfigure ? origOnConfigure.apply(this, arguments) : undefined; + let nodeVersion = nodeData.input.hidden?.ttNnodeVersion ? nodeData.input.hidden.ttNnodeVersion : null; + nodeType.ttNnodeVersion = nodeVersion; + this.properties['ttNnodeVersion'] = this.properties['ttNnodeVersion'] ? this.properties['ttNnodeVersion'] : nodeVersion; + if ((this.properties['ttNnodeVersion']?.split(".")[0] !== nodeVersion?.split(".")[0]) || (this.properties['ttNnodeVersion']?.split(".")[1] !== nodeVersion?.split(".")[1])) { + if (!this.properties['origVals']) { + this.properties['origVals'] = { bgcolor: this.bgcolor, color: this.color, title: this.title } + } + this.bgcolor = "#e76066"; + this.color = "#ff0b1e"; + this.title = this.title.includes("Node Version Mismatch") ? this.title : this.title + " - Node Version Mismatch" + } else if (this.properties['origVals']) { + this.bgcolor = this.properties.origVals.bgcolor; + this.color = this.properties.origVals.color; + this.title = this.properties.origVals.title; + delete this.properties['origVals'] + } + return r; + }; + } + }, + nodeCreated(node) { + if (["pipeLoader", "pipeLoaderSDXL"].includes(node.constructor.title)) { + for (let widget of node.widgets) { + if (widget.name === "control_after_generate") { + widget.value = "fixed" + } + } + } + } +}); diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNdropdown.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNdropdown.js new file mode 100644 index 0000000000000000000000000000000000000000..0640d5f18f8855bde4fc6e7a5564304bbf8c7b68 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNdropdown.js @@ -0,0 +1,265 @@ +// ttN Dropdown +let activeDropdown = null; + +class Dropdown { + constructor(inputEl, options, onSelect, isDict, manualOffset, hostElement) { + this.dropdown = document.createElement('ul'); + this.dropdown.setAttribute('role', 'listbox'); + this.dropdown.classList.add('ttN-dropdown'); + this.selectedIndex = -1; + this.inputEl = inputEl; + this.options = options; + this.onSelect = onSelect; + this.isDict = isDict; + this.manualOffsetX = manualOffset[0]; + this.manualOffsetY = manualOffset[1]; + this.hostElement = hostElement; + + this.focusedDropdown = this.dropdown; + + this.buildDropdown(); + + this.onKeyDownBound = this.onKeyDown.bind(this); + this.onWheelBound = this.onWheel.bind(this); + this.onClickBound = this.onClick.bind(this); + + this.addEventListeners(); + } + + buildDropdown() { + if (this.isDict) { + this.buildNestedDropdown(this.options, this.dropdown); + } else { + this.dropdown.classList.add('ttN-dropdown-scrollable'); + this.options.forEach((suggestion, index) => { + this.addListItem(suggestion, index, this.dropdown); + }); + } + + const inputRect = this.inputEl.getBoundingClientRect(); + if (isNaN(this.manualOffsetX) && this.manualOffsetX.includes('%')) { + this.manualOffsetX = (inputRect.height * (parseInt(this.manualOffsetX) / 100)) + } + if (isNaN(this.manualOffsetY) && this.manualOffsetY.includes('%')) { + this.manualOffsetY = (inputRect.width * (parseInt(this.manualOffsetY) / 100)) + } + this.dropdown.style.top = (inputRect.top + inputRect.height - this.manualOffsetX) + 'px'; + this.dropdown.style.left = (inputRect.left + inputRect.width - this.manualOffsetY) + 'px'; + + this.hostElement.appendChild(this.dropdown); + + activeDropdown = this; + } + + buildNestedDropdown(dictionary, parentElement, currentPath = '') { + let index = 0; + Object.keys(dictionary).forEach((key) => { + let extra_data; + const item = dictionary[key]; + if (typeof item === 'string') { extra_data = item; } + + let fullPath = currentPath ? `${currentPath}/${key}` : key; + if (extra_data) { fullPath = `${fullPath}###${extra_data}`; } + + if (typeof item === "object" && item !== null) { + const nestedDropdown = document.createElement('ul'); + nestedDropdown.setAttribute('role', 'listbox'); + nestedDropdown.classList.add('ttN-nested-dropdown'); + + const hasChildFolders = Object.values(item).some((child) => typeof child === 'object' && child !== null); + if (!hasChildFolders) { + nestedDropdown.classList.add('ttN-dropdown-scrollable'); + } + + const parentListItem = document.createElement('li'); + parentListItem.classList.add('folder'); + parentListItem.textContent = key; + parentListItem.appendChild(nestedDropdown); + parentListItem.addEventListener('mouseover', this.onMouseOver.bind(this, index, parentElement)); + parentElement.appendChild(parentListItem); + this.buildNestedDropdown(item, nestedDropdown, fullPath); + index = index + 1; + } else { + const listItem = document.createElement('li'); + listItem.classList.add('item'); + listItem.setAttribute('role', 'option'); + listItem.textContent = key; + listItem.addEventListener('mouseover', this.onMouseOver.bind(this, index, parentElement)); + listItem.addEventListener('mousedown', (e) => this.onMouseDown(key, e, fullPath)); + parentElement.appendChild(listItem); + index = index + 1; + } + }); + } + + addListItem(item, index, parentElement) { + const listItem = document.createElement('li'); + listItem.classList.add('item'); + listItem.setAttribute('role', 'option'); + listItem.textContent = item; + listItem.addEventListener('mouseover', () => this.onMouseOver(index)); + listItem.addEventListener('mousedown', (e) => this.onMouseDown(item, e)); + parentElement.appendChild(listItem); + } + + addEventListeners() { + document.addEventListener('keydown', this.onKeyDownBound); + this.dropdown.addEventListener('wheel', this.onWheelBound); + document.addEventListener('click', this.onClickBound); + } + + removeEventListeners() { + document.removeEventListener('keydown', this.onKeyDownBound); + this.dropdown.removeEventListener('wheel', this.onWheelBound); + document.removeEventListener('click', this.onClickBound); + } + + closeDropdown() { + if (activeDropdown === this) { + activeDropdown = null; + } + this.removeEventListeners(); + this.dropdown.remove(); + } + + onMouseOver(index, parentElement=null) { + if (parentElement) { + this.focusedDropdown = parentElement; + } + this.selectedIndex = index; + this.updateSelection(); + } + + onMouseDown(suggestion, event, fullPath='') { + event.preventDefault(); + this.onSelect(suggestion, fullPath); + this.closeDropdown(); + } + + onKeyDown(event) { + const enterKeyCode = 13; + const escKeyCode = 27; + const arrowUpKeyCode = 38; + const arrowDownKeyCode = 40; + const arrowRightKeyCode = 39; + const arrowLeftKeyCode = 37; + const tabKeyCode = 9; + + const items = Array.from(this.focusedDropdown.children); + const selectedItem = items[this.selectedIndex]; + + if (activeDropdown) { + if (event.keyCode === arrowUpKeyCode) { + event.preventDefault(); + this.selectedIndex = Math.max(0, this.selectedIndex - 1); + this.updateSelection(); + } + + else if (event.keyCode === arrowDownKeyCode) { + event.preventDefault(); + this.selectedIndex = Math.min(items.length - 1, this.selectedIndex + 1); + this.updateSelection(); + } + + else if (event.keyCode === arrowRightKeyCode && selectedItem) { + event.preventDefault(); + if (selectedItem.classList.contains('folder')) { + const nestedDropdown = selectedItem.querySelector('.ttN-nested-dropdown'); + if (nestedDropdown) { + this.focusedDropdown = nestedDropdown; + this.selectedIndex = 0; + this.updateSelection(); + } + } + } + + else if (event.keyCode === arrowLeftKeyCode && this.focusedDropdown !== this.dropdown) { + const parentDropdown = this.focusedDropdown.closest('.ttN-dropdown, .ttN-nested-dropdown').parentNode.closest('.ttN-dropdown, .ttN-nested-dropdown'); + if (parentDropdown) { + this.focusedDropdown = parentDropdown; + this.selectedIndex = Array.from(parentDropdown.children).indexOf(this.focusedDropdown.parentNode); + this.updateSelection(); + } + } + + else if ((event.keyCode === enterKeyCode || event.keyCode === tabKeyCode) && this.selectedIndex >= 0) { + event.preventDefault(); + if (selectedItem.classList.contains('item')) { + this.onSelect(items[this.selectedIndex].textContent); + this.closeDropdown(); + } + + const nestedDropdown = selectedItem.querySelector('.ttN-nested-dropdown'); + if (nestedDropdown) { + this.focusedDropdown = nestedDropdown; + this.selectedIndex = 0; + this.updateSelection(); + } + } + + else if (event.keyCode === escKeyCode) { + this.closeDropdown(); + } + } + } + + onWheel(event) { + event.preventDefault(); + event.stopPropagation(); + + const invertScroll = !!localStorage.getItem("Comfy.Settings.Comfy.InvertMenuScrolling"); + const delta = invertScroll ? -event.deltaY : event.deltaY; + const hoveredDropdown = event.target.closest('.ttN-dropdown, .ttN-nested-dropdown'); + const scrollTarget = hoveredDropdown || this.focusedDropdown || this.dropdown; + + if (scrollTarget.scrollHeight > scrollTarget.clientHeight) { + scrollTarget.scrollTop += delta; + return; + } + + const offsetStep = invertScroll + ? (event.deltaY < 0 ? 10 : -10) + : (event.deltaY < 0 ? -10 : 10); + + if (scrollTarget !== this.dropdown && scrollTarget.classList.contains('ttN-nested-dropdown')) { + const nestedTop = parseInt(scrollTarget.style.top, 10) || 0; + scrollTarget.style.top = `${nestedTop + offsetStep}px`; + return; + } + + const top = parseInt(this.dropdown.style.top, 10) || 0; + this.dropdown.style.top = `${top + offsetStep}px`; + } + + onClick(event) { + if (!this.dropdown.contains(event.target) && event.target !== this.inputEl) { + this.closeDropdown(); + } + } + + updateSelection() { + if (!this.focusedDropdown.children) { + this.dropdown.classList.add('selected'); + } else { + Array.from(this.focusedDropdown.children).forEach((li, index) => { + if (index === this.selectedIndex) { + li.classList.add('selected'); + li.scrollIntoView({ block: 'nearest' }); + } else { + li.classList.remove('selected'); + } + }); + } + } +} + +export function ttN_RemoveDropdown() { + if (activeDropdown) { + activeDropdown.closeDropdown(); + } +} + +export function ttN_CreateDropdown(inputEl, options, onSelect, isDict = false, manualOffset = [10,'100%'], hostElement = document.body) { + ttN_RemoveDropdown(); + new Dropdown(inputEl, options, onSelect, isDict, manualOffset, hostElement); +} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNdynamicWidgets.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNdynamicWidgets.js new file mode 100644 index 0000000000000000000000000000000000000000..da0956bc556b5f94ead666e531f8fed1ae119caf --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNdynamicWidgets.js @@ -0,0 +1,560 @@ +import { app } from "../../scripts/app.js"; + +let origProps = {}; + +const findWidgetByName = (node, name) => node.widgets.find((w) => w.name === name); + +const doesInputLinkExist = (node, name) => node.inputs ? node.inputs.some((input) => input.link != null) : false; + +function updateNodeHeight(node) { + node.setSize([node.size[0], node.computeSize()[1]]); + app.canvas.dirty_canvas = true; +} + +function toggleWidget(node, widget, show = false, suffix = "") { + if (!widget || doesInputLinkExist(node, widget.name)) return; + if (!origProps[widget.name]) { + origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize, origComputedHeight: widget.computedHeight }; + } + const origSize = node.size; + + widget.type = show ? origProps[widget.name].origType : "ttNhidden" + suffix; + widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -4]; + widget.computedHeight = show ? origProps[widget.name].origComputedHeight : 0; + + widget.linkedWidgets?.forEach(w => toggleWidget(node, w, ":" + widget.name, show)); + + const height = show ? Math.max(node.computeSize()[1], origSize[1]) : node.size[1]; + node.setSize([node.size[0], height]); + app.canvas.dirty_canvas = true +} + +function widgetLogic(node, widget) { + switch (widget.name) { + case 'lora_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_clip_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_clip_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_strength'), true) + } + break; + + case 'lora1_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora1_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora1_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora1_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora1_clip_strength'), true) + } + break; + + case 'lora2_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora2_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora2_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora2_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora2_clip_strength'), true) + } + break; + + case 'lora3_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'lora3_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora3_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora3_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora3_clip_strength'), true) + } + break; + + case 'refiner_ckpt_name': + let refiner_lora1 = findWidgetByName(node, 'refiner_lora1_name')?.value + let refiner_lora2 = findWidgetByName(node, 'refiner_lora2_name')?.value + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'refiner_vae_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_config_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_clip_skip')) + toggleWidget(node, findWidgetByName(node, 'refiner_loras')) + toggleWidget(node, findWidgetByName(node, 'positive_ascore')) + toggleWidget(node, findWidgetByName(node, 'negative_ascore')) + + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_model_strength')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_clip_strength')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_name')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_model_strength')) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'refiner_vae_name'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_config_name'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_clip_skip'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_loras'), true) + toggleWidget(node, findWidgetByName(node, 'positive_ascore'), true) + toggleWidget(node, findWidgetByName(node, 'negative_ascore'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_name'), true) + if (refiner_lora1 !== "None") { + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_lora1_clip_strength'), true) + } + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_name'), true) + if (refiner_lora2 !== "None") { + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_lora2_clip_strength'), true) + } + } + break; + + case 'rescale_after_model': + if (widget.value === false) { + toggleWidget(node, findWidgetByName(node, 'rescale_method')) + toggleWidget(node, findWidgetByName(node, 'rescale')) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else { + toggleWidget(node, findWidgetByName(node, 'rescale_method'), true) + toggleWidget(node, findWidgetByName(node, 'rescale'), true) + + let rescale_value = findWidgetByName(node, 'rescale').value + + if (rescale_value === 'by percentage') { + toggleWidget(node, findWidgetByName(node, 'percent'), true) + } else if (rescale_value === 'to Width/Height') { + toggleWidget(node, findWidgetByName(node, 'width'), true) + toggleWidget(node, findWidgetByName(node, 'height'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'longer_side'), true) + } + toggleWidget(node, findWidgetByName(node, 'crop'), true) + } + break; + + case 'rescale': + let rescale_after_model = findWidgetByName(node, 'rescale_after_model')?.value + let hiresfix = findWidgetByName(node, 'upscale_method') || findWidgetByName(node, 'rescale_method') + if (typeof(hiresfix.value) == 'string' && hiresfix.value.includes('hiresFix')) { + hiresfix = true + } else { + hiresfix = false + } + if (widget.value === 'by percentage' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'percent'), true) + } else if (widget.value === 'to Width/Height' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width'), true) + toggleWidget(node, findWidgetByName(node, 'height'), true) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + } else if (widget.value === 'to longer side - maintain aspect' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'longer_side'), true) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'percent')) + } else if (widget.value === 'None' && (rescale_after_model || hiresfix)) { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'percent')) + } else { + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'percent')) + } + break; + + case 'upscale_method': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + toggleWidget(node, findWidgetByName(node, 'upscale_model_name')) + toggleWidget(node, findWidgetByName(node, 'rescale')) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + } else { + if (typeof(widget.value) === 'string' && widget.value.includes('[hiresFix]')) { + let rescale = findWidgetByName(node, 'rescale') + toggleWidget(node, rescale, true) + if (rescale?.value === 'by percentage') { + toggleWidget(node, findWidgetByName(node, 'percent'), true) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else if (rescale?.value === 'to Width/Height') { + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'width'), true) + toggleWidget(node, findWidgetByName(node, 'height'), true) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else if (rescale?.value === 'to Width/Height') { + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side'), true) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } else { + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'factor')) + toggleWidget(node, findWidgetByName(node, 'crop')) + } + toggleWidget(node, findWidgetByName(node, 'upscale_model_name'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'upscale_model_name')) + toggleWidget(node, findWidgetByName(node, 'rescale')) + toggleWidget(node, findWidgetByName(node, 'percent')) + toggleWidget(node, findWidgetByName(node, 'width')) + toggleWidget(node, findWidgetByName(node, 'height')) + toggleWidget(node, findWidgetByName(node, 'longer_side')) + toggleWidget(node, findWidgetByName(node, 'factor'), true) + toggleWidget(node, findWidgetByName(node, 'crop'), true) + } + } + break; + + case 'image_output': + if (['Hide', 'Preview'].includes(widget.value)) { + toggleWidget(node, findWidgetByName(node, 'save_prefix')) + toggleWidget(node, findWidgetByName(node, 'output_path')) + toggleWidget(node, findWidgetByName(node, 'embed_workflow')) + toggleWidget(node, findWidgetByName(node, 'number_padding')) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing')) + toggleWidget(node, findWidgetByName(node, 'file_type')) + } else if (['Save', 'Hide/Save', 'Disabled'].includes(widget.value)) { + toggleWidget(node, findWidgetByName(node, 'save_prefix'), true) + toggleWidget(node, findWidgetByName(node, 'output_path'), true) + toggleWidget(node, findWidgetByName(node, 'number_padding'), true) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing'), true) + toggleWidget(node, findWidgetByName(node, 'file_type'), true) + const fileTypeValue = findWidgetByName(node, 'file_type')?.value + if (['png', 'webp'].includes(fileTypeValue)) { + toggleWidget(node, findWidgetByName(node, 'embed_workflow'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'embed_workflow')) + } + } + break; + + case 'text_output': + if (widget.value === "Preview") { + toggleWidget(node, findWidgetByName(node, 'save_prefix')) + toggleWidget(node, findWidgetByName(node, 'output_path')) + toggleWidget(node, findWidgetByName(node, 'number_padding')) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing')) + toggleWidget(node, findWidgetByName(node, 'file_type')) + } else if (widget.value === "Save") { + toggleWidget(node, findWidgetByName(node, 'save_prefix'), true) + toggleWidget(node, findWidgetByName(node, 'output_path'), true) + toggleWidget(node, findWidgetByName(node, 'number_padding'), true) + toggleWidget(node, findWidgetByName(node, 'overwrite_existing'), true) + toggleWidget(node, findWidgetByName(node, 'file_type'), true) + } + + case 'add_noise': + if (widget.value === "disable") { + toggleWidget(node, findWidgetByName(node, 'noise_seed')) + toggleWidget(node, findWidgetByName(node, 'control_after_generate')) + } else { + toggleWidget(node, findWidgetByName(node, 'noise_seed'), true) + toggleWidget(node, findWidgetByName(node, 'control_after_generate'), true) + } + break; + + case 'ckpt_B_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'config_B_name')) + } else { + toggleWidget(node, findWidgetByName(node, 'config_B_name'), true) + } + break; + + case 'ckpt_C_name': + if (widget.value === "None") { + toggleWidget(node, findWidgetByName(node, 'config_C_name')) + } else { + toggleWidget(node, findWidgetByName(node, 'config_C_name'), true) + } + break; + + case 'save_model': + if (widget.value === "True") { + toggleWidget(node, findWidgetByName(node, 'save_prefix'), true) + + } else { + toggleWidget(node, findWidgetByName(node, 'save_prefix')) + } + break; + + case 'num_loras': + let number_to_show = widget.value + 1 + for (let i = 0; i < number_to_show; i++) { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_name'), true) + if (findWidgetByName(node, 'mode').value === "simple") { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength'), true) + } + } + for (let i = number_to_show; i < 21; i++) { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_name')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength')) + } + updateNodeHeight(node); + break; + + case 'mode': + if (node.constructor.title === "pipeLoraStack") { + let number_to_show2 = findWidgetByName(node, 'num_loras')?.value + 1 + for (let i = 0; i < number_to_show2; i++) { + if (widget.value === "simple") { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength')) + } else { + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_strength')) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_model_strength'), true) + toggleWidget(node, findWidgetByName(node, 'lora_'+i+'_clip_strength'), true)} + } + updateNodeHeight(node) + break; + } else if (node.constructor.title === "advPlot combo") { + if (widget.value === 'all') { + toggleWidget(node, findWidgetByName(node, 'start_from')) + toggleWidget(node, findWidgetByName(node, 'end_with')) + toggleWidget(node, findWidgetByName(node, 'select')) + toggleWidget(node, findWidgetByName(node, 'selection')) + } else if (widget.value === 'range') { + toggleWidget(node, findWidgetByName(node, 'start_from'), true) + toggleWidget(node, findWidgetByName(node, 'end_with'), true) + toggleWidget(node, findWidgetByName(node, 'select')) + toggleWidget(node, findWidgetByName(node, 'selection')) + } else { + toggleWidget(node, findWidgetByName(node, 'start_from')) + toggleWidget(node, findWidgetByName(node, 'end_with')) + toggleWidget(node, findWidgetByName(node, 'select'), true) + toggleWidget(node, findWidgetByName(node, 'selection'), true) + } + } + + case 'empty_latent_aspect': + if (widget.value !== 'width x height [custom]') { + toggleWidget(node, findWidgetByName(node, 'empty_latent_width')) + toggleWidget(node, findWidgetByName(node, 'empty_latent_height')) + } else { + toggleWidget(node, findWidgetByName(node, 'empty_latent_width'), true) + toggleWidget(node, findWidgetByName(node, 'empty_latent_height'), true) + } + break; + + case 'conditioning_aspect': + if (widget.value !== 'width x height [custom]') { + toggleWidget(node, findWidgetByName(node, 'conditioning_width')) + toggleWidget(node, findWidgetByName(node, 'conditioning_height')) + } else { + toggleWidget(node, findWidgetByName(node, 'conditioning_width'), true) + toggleWidget(node, findWidgetByName(node, 'conditioning_height'), true) + } + break; + + case 'target_aspect': + if (widget.value !== 'width x height [custom]') { + toggleWidget(node, findWidgetByName(node, 'target_width')) + toggleWidget(node, findWidgetByName(node, 'target_height')) + } else { + toggleWidget(node, findWidgetByName(node, 'target_width'), true) + toggleWidget(node, findWidgetByName(node, 'target_height'), true) + } + break; + + case 'toggle': + widget.type = 'toggle' + widget.options = {on: 'Enabled', off: 'Disabled'} + break; + + case 'refiner_steps': + if (widget.value == 0) { + toggleWidget(node, findWidgetByName(node, 'refiner_cfg')) + toggleWidget(node, findWidgetByName(node, 'refiner_denoise')) + } else { + toggleWidget(node, findWidgetByName(node, 'refiner_cfg'), true) + toggleWidget(node, findWidgetByName(node, 'refiner_denoise'), true) + } + break; + + case 'sampler_state': + if (widget.value == 'Hold') { + findWidgetByName(node, 'control_after_generate').value = 'fixed' + } + break; + + case 'print_to_console': + if (widget.value == false) { + toggleWidget(node, findWidgetByName(node, 'console_title')) + toggleWidget(node, findWidgetByName(node, 'console_color')) + } else { + toggleWidget(node, findWidgetByName(node, 'console_title'), true) + toggleWidget(node, findWidgetByName(node, 'console_color'), true) + } + break; + + case 'sampling': + if (widget.value == 'Default') { + toggleWidget(node, findWidgetByName(node, 'zsnr')) + } else { + toggleWidget(node, findWidgetByName(node, 'zsnr'), true) + } + break; + + case 'range_mode': + function setWidgetOptions(widget, options) { + widget.options.step = options.step; + widget.options.round = options.round; + widget.options.precision = options.precision; + } + + if (widget.value.startsWith('step')) { + toggleWidget(node, findWidgetByName(node, 'stop')) + toggleWidget(node, findWidgetByName(node, 'step'), true) + toggleWidget(node, findWidgetByName(node, 'include_stop')) + } else { + toggleWidget(node, findWidgetByName(node, 'stop'), true) + toggleWidget(node, findWidgetByName(node, 'step')) + toggleWidget(node, findWidgetByName(node, 'include_stop'), true) + } + if (widget.value.endsWith('int')) { + const intOptions = { + step: 10, + round: 1, + precision: 0 + }; + const start_widget = findWidgetByName(node, 'start') + const stop_widget = findWidgetByName(node, 'stop') + const step_widget = findWidgetByName(node, 'step') + setWidgetOptions(start_widget, intOptions); + setWidgetOptions(stop_widget, intOptions); + setWidgetOptions(step_widget, intOptions); + + } else { + const floatOptions = { + step: 0.1, + round: 0.01, + precision: 2 + }; + const start_widget = findWidgetByName(node, 'start') + const stop_widget = findWidgetByName(node, 'stop') + const step_widget = findWidgetByName(node, 'step') + setWidgetOptions(start_widget, floatOptions); + setWidgetOptions(stop_widget, floatOptions); + setWidgetOptions(step_widget, floatOptions); + } + break; + + case 'file_type': + const imageOutputValue = findWidgetByName(node, 'image_output').value + if (['png', 'webp'].includes(widget.value) && ['Save', 'Hide/Save', 'Disabled'].includes(imageOutputValue)) { + toggleWidget(node, findWidgetByName(node, 'embed_workflow'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'embed_workflow')) + } + break; + + case 'replace_mode': + if (widget.value == true) { + toggleWidget(node, findWidgetByName(node, 'search_string'), true) + } else { + toggleWidget(node, findWidgetByName(node, 'search_string')) + } + } +} + +const getSetWidgets = ['rescale_after_model', 'rescale', 'image_output', + 'lora_name', 'lora1_name', 'lora2_name', 'lora3_name', + 'refiner_lora1_name', 'refiner_lora2_name', 'refiner_steps', 'upscale_method', + 'image_output', 'text_output', 'add_noise', + 'ckpt_B_name', 'ckpt_C_name', 'save_model', 'refiner_ckpt_name', + 'num_loras', 'mode', 'toggle', 'empty_latent_aspect', 'conditioning_aspect', 'target_aspect', 'sampler_state', + 'print_to_console', 'sampling', 'range_mode', 'file_type', 'replace_mode'] +const getSetTitles = [ + "hiresfixScale", + "pipeLoader", + "pipeLoader v1 (Legacy)", + "pipeLoaderSDXL", + "pipeLoaderSDXL v1 (Legacy)", + "pipeKSampler", + "pipeKSampler v1 (Legacy)", + "pipeKSamplerAdvanced", + "pipeKSamplerAdvanced v1 (Legacy)", + "pipeKSamplerSDXL", + "pipeKSamplerSDXL v1 (Legacy)", + "imageRemBG", + "imageOutput", + "multiModelMerge", + "pipeLoraStack", + "pipeEncodeConcat", + "tinyKSampler", + "debugInput", + "tinyLoader", + "advPlot range", + "advPlot combo", + "advPlot images", + "advPlot string", + "textOutput", +]; + +function getSetters(node) { + if (node.widgets) + for (const w of node.widgets) { + if (getSetWidgets.includes(w.name)) { + widgetLogic(node, w); + let widgetValue = w.value; + + // Define getters and setters for widget values + Object.defineProperty(w, 'value', { + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + widgetLogic(node, w); + } + } + }); + } + } +} + +app.registerExtension({ + name: "comfy.ttN.dynamicWidgets", + + nodeCreated(node) { + const nodeTitle = node.constructor.title; + if (getSetTitles.includes(nodeTitle)) { + getSetters(node); + } + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNembedAC.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNembedAC.js new file mode 100644 index 0000000000000000000000000000000000000000..20ac845992e20defadd30f37639c79a81939ed13 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNembedAC.js @@ -0,0 +1,290 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttNdropdown.js"; + +// Initialize some global lists and objects. +let autoCompleteDict = {}; // {prefix: [suggestions]} +let autoCompleteHierarchy = {}; +let nsp_keys = ['3d-terms', 'adj-architecture', 'adj-beauty', 'adj-general', 'adj-horror', 'album-cover', 'animals', 'artist', 'artist-botanical', 'artist-surreal', 'aspect-ratio', 'band', 'bird', 'body-fit', 'body-heavy', 'body-light', 'body-poor', 'body-shape', 'body-short', 'body-tall', 'bodyshape', 'camera', 'camera-manu', 'celeb', 'color', 'color-palette', 'comic', 'cosmic-galaxy', 'cosmic-nebula', 'cosmic-star', 'cosmic-terms', 'details', 'dinosaur', 'eyecolor', 'f-stop', 'fantasy-creature', 'fantasy-setting', 'fish', 'flower', 'focal-length', 'foods', 'forest-type', 'fruit', 'games', 'gen-modifier', 'gender', 'gender-ext', 'hair', 'hd', 'identity', 'identity-adult', 'identity-young', 'iso-stop', 'landscape-type', 'movement', 'movie', 'movie-director', 'nationality', 'natl-park', 'neg-weight', 'noun-beauty', 'noun-emote', 'noun-fantasy', 'noun-general', 'noun-horror', 'occupation', 'penciller', 'photo-term', 'pop-culture', 'pop-location', 'portrait-type', 'punk', 'quantity', 'rpg-Item', 'scenario-desc', 'site', 'skin-color', 'style', 'tree', 'trippy', 'water', 'wh-site'] + +function getFileName(path) { + return path.split(/[\/:\\]/).pop(); +} + +function getCurrentWord(widget) { + const formattedInput = widget.inputEl.value.replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + const words = formattedInput.split(' '); + + const adjustedInput = widget.inputEl.value.substring(0, widget.inputEl.selectionStart) + .replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + + const currentWordPosition = adjustedInput.split(' ').length - 1; + + return words[currentWordPosition].toLowerCase(); +} + +function isTriggerWord(word) { + for (let prefix in autoCompleteDict) { + if ((prefix.startsWith(word) && word.length > 1) || word.startsWith(prefix)) return true; + } + return false; +} + +const _generatePrefixes = (str) => { + const prefixes = []; + while (str.length > 1) { + prefixes.push(str); + str = str.substring(0, str.length - 1); + } + return prefixes; +}; + +function _cleanInputWord(word) { + let prefixesToRemove = []; + for (let prefix in autoCompleteDict) { + prefixesToRemove = [...prefixesToRemove, ..._generatePrefixes(prefix)]; + } + let cleanedWord = prefixesToRemove.reduce((acc, prefix) => acc.replace(prefix, ''), word.toLowerCase()); + if (cleanedWord.includes(':')) { + const parts = cleanedWord.split(':'); + cleanedWord = parts[0]; + } + return cleanedWord.replace(/\//g, "\\"); +} + +function getSuggestionsForWord(word) { + let suggestions = []; + for (let prefix in autoCompleteDict) { + if ((prefix.startsWith(word) && word.length > 1) || word.startsWith(prefix)) { + suggestions = autoCompleteDict['fpath_' + prefix]; // Get suggestions from the dictionary + break; + } + } + const cleanedWord = _cleanInputWord(word); + // Filter suggestions based on the cleaned word + return suggestions.filter(suggestion => + suggestion.toLowerCase().includes(cleanedWord) || getFileName(suggestion).toLowerCase().includes(cleanedWord) + ); +} + + +function _convertListToHierarchy(list) { + const hierarchy = {}; + list.forEach(item => { + const parts = item.split(/:\\|\\/); + let node = hierarchy; + parts.forEach((part, idx) => { + node = node[part] = (idx === parts.length - 1) ? null : (node[part] || {}); + }); + }); + return hierarchy; +} + +function _insertSuggestion(widget, suggestion) { + const formattedInput = widget.inputEl.value.replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + const inputSegments = formattedInput.split(' '); + + const adjustedInput = widget.inputEl.value.substring(0, widget.inputEl.selectionStart) + .replace(/>\s*/g, '> ').replace(/\s+/g, ' '); + const currentSegmentIndex = adjustedInput.split(' ').length - 1; + + let matchedPrefix = ''; + let currentSegment = inputSegments[currentSegmentIndex].toLowerCase(); + if (["loras", "refiner_loras"].includes(widget.name) && ['', ' ','<','')) { + oldSuffix = oldSuffix.split('>')[0] + '>'; + } + suffix = oldSuffix ? ':' + oldSuffix : ':1>'; + } + if (matchedPrefix === '__') { + suffix = '__'; + } + + inputSegments[currentSegmentIndex] = matchedPrefix + suggestion + suffix; + return inputSegments.join(' '); +} + +function showSuggestionsDropdown(widget, suggestions) { + const hierarchy = _convertListToHierarchy(suggestions); + ttN_CreateDropdown(widget.inputEl, hierarchy, selected => { + widget.inputEl.value = _insertSuggestion(widget, selected); + }, true); +} + + +function _initializeAutocompleteData(initialList, prefix) { + autoCompleteDict['fpath_' + prefix] = initialList + autoCompleteDict[prefix] = initialList.map(getFileName).map(item => prefix + item); +} + +function _initializeAutocompleteList(initialList, prefix) { + autoCompleteDict['fpath_' + prefix] = initialList + autoCompleteDict[prefix] = initialList.map(item => prefix + item); +} + +function _isRelevantWidget(widget) { + return (["customtext", "ttNhidden"].includes(widget.type) && (widget.dynamicPrompts !== false) || widget.dynamicPrompts) && !_isLorasWidget(widget); +} + +function _isLorasWidget(widget) { + return (["customtext", "ttNhidden"].includes(widget.type) && ["loras", "refiner_loras"].includes(widget.name)); +} + +function findPysssss(lora=false) { + const found = JSON.parse(app.ui.settings.getSettingValue('pysssss.AutoCompleter')) || false; + if (found && lora) { + return JSON.parse(localStorage.getItem("pysssss.AutoCompleter.ShowLoras")) || false; + } + return found; +} + +function _attachInputHandler(widget) { + if (!widget.ttNhandleInput) { + widget.ttNhandleInput = () => { + if (findPysssss()) { + return + } + + let currentWord = getCurrentWord(widget); + if (isTriggerWord(currentWord)) { + const suggestions = getSuggestionsForWord(currentWord); + if (suggestions.length > 0) { + showSuggestionsDropdown(widget, suggestions); + } else { + ttN_RemoveDropdown(); + } + } else { + ttN_RemoveDropdown(); + } + }; + } + ['input', 'mousedown'].forEach(event => { + widget?.inputEl?.removeEventListener(event, widget.ttNhandleInput); + if (findPysssss()) { + return + } + widget?.inputEl?.addEventListener(event, widget.ttNhandleInput); + }); +} + +function _attachLorasHandler(widget) { + if (!widget.ttNhandleLorasInput) { + widget.ttNhandleLorasInput = () => { + if (findPysssss(true)) { + return + } + let currentWord = getCurrentWord(widget); + if (['',' ','<',' 0) { + showSuggestionsDropdown(widget, suggestions); + } else { + ttN_RemoveDropdown(); + } + } else { + ttN_RemoveDropdown(); + } + }; + } + + ['input', 'mouseup'].forEach(event => { + widget?.inputEl?.removeEventListener(event, widget.ttNhandleLorasInput); + if (findPysssss(true)) { + return + } + widget?.inputEl?.addEventListener(event, widget.ttNhandleLorasInput); + }); + + if (!widget.ttNhandleScrollInput) { + widget.ttNhandleScrollInput = (event) => { + event.preventDefault(); + + const step = event.ctrlKey ? 0.1 : 0.01; + + // Determine the scroll direction + const direction = Math.sign(event.deltaY); // Will be -1 for scroll up, 1 for scroll down + + // Get the current selection + const inputEl = widget.inputEl; + let selectionStart = inputEl.selectionStart; + let selectionEnd = inputEl.selectionEnd; + const selected = inputEl.value.substring(selectionStart, selectionEnd); + + if (selected === 'lora' || selected === 'skip') { + const swapWith = selected === 'lora' ? 'skip' : 'lora'; + inputEl.value = inputEl.value.substring(0, selectionStart) + swapWith + inputEl.value.substring(selectionEnd); + inputEl.setSelectionRange(selectionStart, selectionStart + swapWith.length); + return + } + + // Expand the selection to make sure the whole number is selected + while (selectionStart > 0 && /\d|\.|-/.test(inputEl.value.charAt(selectionStart - 1))) { + selectionStart--; + } + while (selectionEnd < inputEl.value.length && /\d|\.|-/.test(inputEl.value.charAt(selectionEnd))) { + selectionEnd++; + } + + const selectedText = inputEl.value.substring(selectionStart, selectionEnd); + + // Check if the selected text is a number + if (!isNaN(selectedText) && selectedText.trim() !== '') { + let trail = selectedText.split('.')[1]?.length; + if (!trail || trail < 2) { + trail = 2; + } + + const currentValue = parseFloat(selectedText); + let modifiedValue = currentValue - direction * step; + + // Format the number to avoid floating point precision issues and then convert back to a float + modifiedValue = parseFloat(modifiedValue.toFixed(trail)); + + // Replace the selected text with the new value, keeping the selection + inputEl.value = inputEl.value.substring(0, selectionStart) + modifiedValue + inputEl.value.substring(selectionEnd); + const newSelectionEnd = selectionStart + modifiedValue.toString().length; + inputEl.setSelectionRange(selectionStart, newSelectionEnd); + } + }; + } + + widget.inputEl.removeEventListener('wheel', widget.ttNhandleScrollInput); + widget.inputEl.addEventListener('wheel', widget.ttNhandleScrollInput); +} + +app.registerExtension({ + name: "comfy.ttN.AutoComplete", + async init() { + const embs = await api.fetchApi("/embeddings") + const loras = await api.fetchApi("/ttN/loras") + + _initializeAutocompleteData(await embs.json(), 'embedding:'); + _initializeAutocompleteData(await loras.json(), ' i.src.includes("filename")); + return img ? img.src : null; +} + +function _findLatentPreviewImageSRC(node) { + if (!node.imgs) return null; + + if (node.imageIndex != null && + node.imageIndex < node.imgs.length) { + return node.imgs[node.imageIndex].src; + } + + if (node.overIndex != null && + node.overIndex < node.imgs.length) { + return node.imgs[node.overIndex].src; + } + + return null; +} + +function updateImageTLDE() { + for (let node of app.graph._nodes) { + if (!node.imgs) continue; + + const finalSrc = _findFullImageSRC(node); + const latentSrc = _findLatentPreviewImageSRC(node); + + ttN_srcDict[node.id] = ttN_srcDict[node.id] || []; + + let previousLength = ttN_srcDict[node.id].length; + + if ( + finalSrc && + finalSrc.includes("filename") && + !ttN_srcDict[node.id].includes(finalSrc) + ) { + ttN_srcDict[node.id].push(finalSrc); + + // CAP HISTORY + if (ttN_srcDict[node.id].length > MAX_HISTORY_PER_NODE) { + ttN_srcDict[node.id].shift(); + } + } + + const viewers = + [...TTNViewer.instances] + .filter(v => v.node.id === node.id); + + for (const viewer of viewers) { + + const wasLast = viewer.imageIndex === previousLength - 1; + + if (finalSrc && wasLast && viewer.slideshow) { + viewer.setImage(-1); + continue; + } + + if ( + viewer.slideshow && + wasLast && + latentSrc && + !latentSrc.includes("filename") && + !finalSrc + ) { + viewer.image.src = latentSrc; + } + } + } + + const validNodeIds = new Set(app.graph._nodes.map(n => n.id)); + if (validNodeIds.size > 0) { + Object.keys(ttN_srcDict).forEach(id => { + if (!validNodeIds.has(Number(id))) { + delete ttN_srcDict[id]; + } + }); + } + + saveSrcDict(); + + TTNViewer.instances.forEach(v => v.refreshImages()); +} + +let _updateScheduled = null; + +function scheduleImageUpdate(delay = 300) { + if (_updateScheduled) return; + + _updateScheduled = setTimeout(() => { + updateImageTLDE(); + _updateScheduled = null; + }, delay); +} + +function _handleExecutedEvent(e) { + scheduleImageUpdate(500); +} + +function clearSrcDict() { + ttN_srcDict = {}; + saveSrcDict(); +} + +function _handleReconnectingEvent(e) { + clearSrcDict(); + localStorage.removeItem(STORAGE_KEYS.DEFAULTNODE); +} + + +api.addEventListener("status", _handleExecutedEvent); +api.addEventListener("progress", _handleExecutedEvent); +api.addEventListener("execution_cached", _handleExecutedEvent); +api.addEventListener("reconnecting", _handleReconnectingEvent); + +/* ========================================================= + VIEWER ENGINE +========================================================= */ + +class TTNViewer { + static instances = new Set(); + static fullscreenInstance = null; + + constructor(node, doc, mode = "fullscreen") { + this.node = node; + this.doc = doc; + this.mode = mode; + + this.imageIndex = -1; + + // Compare state + this.compareBase = null; + this.compareTarget = null; + this.comparing = false; + + // Transform + this.scale = 1; + this.offsetX = 0; + this.offsetY = 0; + this.dragging = false; + this.dragStartX = 0; + this.dragStartY = 0; + + this.autohide = JSON.parse(localStorage.getItem(STORAGE_KEYS.AUTOHIDE)) ?? true; + this.invertctrl = JSON.parse(localStorage.getItem(STORAGE_KEYS.INVERT)) ?? false; + this.fitscreentoggle = JSON.parse(localStorage.getItem(STORAGE_KEYS.FITSCREEN)) ?? true; + + this.slideshow = true; + this.hideTimeout = null; + + this._lastWheelTime = 0; + this._resizeObserver = null; + this._wheelOptions = { passive: false }; + this._lastSignature = null; + + TTNViewer.instances.add(this); + if (this.mode === "popout") { + window.addEventListener("storage", () => { + this.refreshImages(); + }); + } + this.init(); + } + + /* ================= INIT ================= */ + + init() { + this.injectCSS(); + this.createLayout(); + this.attachEvents(); + this.refreshImages(); + + if (this.mode === "fullscreen") this.wrapper.requestFullscreen(); + } + + injectCSS() { + if (this.doc.getElementById("ttn-viewer-style")) return; + + const style = this.doc.createElement("style"); + style.id = "ttn-viewer-style"; + style.innerHTML = ` + html, body { + margin:0; + padding:0; + width:100%; + height:100%; + background:black; + } + + .hidden { + transition: opacity 0.5s, visibility 0.5s, transform 0.2s ease!important; + opacity: 0!important; + visibility: hidden!important; + } + + .ttn-wrapper { + position: fixed; + inset: 0; + width:100%; + height:100%; + display:flex; + justify-content:center; + align-items:center; + transition: background 0.3s; + background-color: #1f1f1f; + } + + .ttn-wrapper.slideshow { + background:black; + } + + .ttn-main-img { + position:absolute; + transform-origin: 0 0; + max-width:none; + max-height:none; + user-select:none; + transform: translateZ(0); + } + + .ttn-previews { + position: absolute; + bottom: 0; + left: 0; + display: flex; + width: max-content; + height: 110px; + transition: transform 0.2s ease; + align-items: flex-end; + background: black; + } + + .ttn-img { + height: 90px; + border: 10px solid black; + cursor: pointer; + display: block; + transition: height 0.4s ease, transform 0.4s ease; + background: black; + box-sizing: content-box; + } + + + .ttn-img.active { + height: 140px; + z-index: 10; + transition: 0.1s; + } + + .ttn-img.before { + transform: scale(1.01); + } + + .ttn-img.before:hover { + height: 110px!important; + z-index: 10; + } + + .ttn-img.after { + transform: scale(1.01); + } + + .ttn-img.after:hover { + height: 110px!important; + z-index: 10; + } + .ttn-img.compare-base { + border:10px solid cyan; + } + + .ttn-img.compare-target { + border:10px solid red; + } + + .ttn-context { + position:absolute; + background:#222; + color:white; + padding:5px; + border:1px solid #555; + z-index:9999; + font-size:14px; + } + + .ttn-context div { + padding:4px 10px; + cursor:pointer; + } + + .ttn-context div:hover { + background:#444; + } + + .settingsBtn { + position: absolute; + top: 10px; + right: 10px; + z-index: 20; + background: gray; + color: white; + border-width: medium; + border-color: silver; + box-sizing: content-box; + } + + .settingsMenu { + position: absolute; + top: 35px; + right: 10px; + background: #222; + padding: 10px; + border: 1px solid #555; + z-Index: 20; + width: 140px; + box-sizing: content-box; + } + + .ttn-btn { + width:stretch; + background: #202020; + border-color: black; + color: gray; + margin: 5px; + padding: 5px; + } + + .ttN-dropdown, .ttN-nested-dropdown { + position: relative; + box-sizing: border-box; + background-color: #171717; + box-shadow: 0 4px 4px rgba(255, 255, 255, .25); + padding: 0; + margin: 0; + list-style: none; + z-index: 1000; + overflow: visible; + max-height: fit-content; + max-width: fit-content; + color: white; + } + + .ttN-dropdown { + position: absolute; + border-radius: 0; + } + + .ttN-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + min-width: 220px; + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; + } + + .ttN-nested-dropdown.ttN-dropdown-scrollable { + max-height: min(48vh, 360px); + overflow-y: auto; + overflow-x: hidden; + overscroll-behavior: contain; + scrollbar-gutter: stable; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar { + width: 10px; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-track { + background: #121212; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb { + background: #4b4b4b; + border-radius: 8px; + } + + .ttN-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover, + .ttN-nested-dropdown.ttN-dropdown-scrollable::-webkit-scrollbar-thumb:hover { + background: #646464; + } + + /* Style for final items */ + .ttN-dropdown li.item, .ttN-nested-dropdown li.item { + font-weight: normal; + min-width: max-content; + } + + /* Style for folders (parent items) */ + .ttN-dropdown li.folder, .ttN-nested-dropdown li.folder { + cursor: default; + position: relative; + border-right: 3px solid #005757; + } + + .ttN-dropdown li.folder::after, .ttN-nested-dropdown li.folder::after { + content: ">"; + position: absolute; + right: 2px; + font-weight: normal; + } + + .ttN-dropdown li, .ttN-nested-dropdown li { + padding: 4px 10px; + cursor: pointer; + font-family: system-ui; + font-size: 0.7rem; + position: relative; + } + + /* Style for nested dropdowns */ + .ttN-nested-dropdown { + position: absolute; + top: 0; + left: 100%; + margin: 0; + border: none; + display: none; + } + + .ttN-dropdown li.selected > .ttN-nested-dropdown, + .ttN-nested-dropdown li.selected > .ttN-nested-dropdown { + display: block; + border: none; + } + + .ttN-dropdown li.selected, + .ttN-nested-dropdown li.selected { + background-color: #222222; + border: none; + } + `; + this.doc.head.appendChild(style); + } + + createLayout() { + this.wrapper = this.doc.createElement("div"); + this.wrapper.className = "ttn-wrapper slideshow"; + this.doc.body.appendChild(this.wrapper); + + this.image = this.doc.createElement("img"); + this.image.className = "ttn-main-img"; + this.wrapper.appendChild(this.image); + + this.previewBar = this.doc.createElement("div"); + this.previewBar.className = "ttn-previews hidden"; + this.wrapper.appendChild(this.previewBar); + + this.settingsBtn = this.doc.createElement("button"); + this.settingsBtn.innerText = "⚙"; + this.settingsBtn.className = "settingsBtn hidden" + this.wrapper.appendChild(this.settingsBtn); + + this.settingsBtn.onclick = () => + this.toggleSettingsMenu(); + } + + /* ================= IMAGE ================= */ + + refreshImages() { + const list = ttN_srcDict[this.node.id] || []; + + const newSignature = list.join("|"); + if (this._lastSignature === newSignature) return; + this._lastSignature = newSignature; + + this.previewBar.innerHTML = ""; + + list.forEach((src, i) => { + const img = this.doc.createElement("img"); + img.src = src; + img.className = "ttn-img"; + + img.onclick = () => this.setImage(i); + + img.oncontextmenu = (e) => { + e.preventDefault(); + this.ttNcontextMenu(img, i); + }; + + this.previewBar.appendChild(img); + }); + + if (list.length && this.imageIndex === -1) { + this.setImage(list.length - 1); + } + + this.updatePreviewHighlight(); + } + + setImage(i) { + const list = ttN_srcDict[this.node.id] || []; + if (!list.length) return; + + if (i === -1) { + i = list.length - 1; + } else { + i = ((i % list.length) + list.length) % list.length; + } + + this.imageIndex = i; + this.image.src = list[i]; + + this.updatePreviewHighlight(); + + const activeThumb = this.previewBar.children[i]; + + if (activeThumb && !activeThumb.complete) { + activeThumb.onload = () => { + requestAnimationFrame(() => + this.applyPreviewTranslation() + ); + }; + } else { + requestAnimationFrame(() => + this.applyPreviewTranslation() + ); + } + } + + next(ctrl=false, shift=false, reverse=false) { + const num = shift === true ? 5 : 1 + if (this.compareBase !== null && this.compareTarget !== null) { + this.imageIndex = + this.imageIndex === this.compareBase + ? this.compareTarget + : this.compareBase; + this.setImage(this.imageIndex); + return; + } + if (reverse) { + if (ctrl) { + this.setImage(0) + } else { + this.setImage(this.imageIndex - num); + } + } else { + if (ctrl) { + this.setImage(-1) + } else { + this.setImage(this.imageIndex + num); + } + } + } + + prev(ctrl=false, shift=false) { this.next(ctrl, shift, true); } + + /* ================= COMPARE ================= */ + ttNcontextMenu(imgElement, index) { + const SOC = 'Select for Compare' + const CWS = 'Compare with Selected' + const CC = 'Clear Compare' + + let suggestions = {} + + if (this.compareBase !== index && this.compareTarget !== index) { + suggestions[SOC] = null + } + + if (this.compareBase !== null && this.compareBase !== index && this.compareTarget !== index) { + suggestions[CWS] = null + } + + if (this.comparing || this.compareBase !== null) { + suggestions[CC] = null + } + + const manualOffset = ['80%', '70%']; + ttN_CreateDropdown(imgElement, suggestions, async (s) => { + if (s === SOC) { + this.compareBase = index; + this.setImage(index); + this.updatePreviewHighlight(); + } + if (s === CWS) { + if (this.compareBase !== null && this.compareBase !== index) { + this.compareTarget = index; + this.imageIndex = this.compareBase; + this.comparing = true; + this.setImage(index); + } + this.updatePreviewHighlight(); + } + if (s === CC) { + this.compareBase = null; + this.compareTarget = null; + this.comparing = false; + this.updatePreviewHighlight(); + } + }, true, manualOffset, this.wrapper) + } + + updatePreviewHighlight() { + [...this.previewBar.children].forEach((el, i) => { + el.classList.toggle("active", i === this.imageIndex); + el.classList.toggle("compare-base", i === this.compareBase); + el.classList.toggle("compare-target", i === this.compareTarget); + + el.classList.toggle("before", i < this.imageIndex) + el.classList.toggle("after", i > this.imageIndex) + }); + } + + /* ================= TRANSFORM ================= */ + + resetTransform() { + this.scale = 1; + this.offsetX = 0; + this.offsetY = 0; + this.applyTransform(); + } + + applyTransform() { + const x = Math.round(this.offsetX * 1000) / 1000; + const y = Math.round(this.offsetY * 1000) / 1000; + const s = Math.round(this.scale * 1000) / 1000; + + this.image.style.transform = + `translate(${x}px, ${y}px) scale(${s})`; + } + + applyPreviewTranslation() { + if (!this.previewBar.children.length) return; + + const active = this.previewBar.children[this.imageIndex]; + if (!active) return; + + requestAnimationFrame(() => { + // Distance from preview bar left edge to active center + const activeCenter = + active.offsetLeft + + active.offsetWidth / 2 + + parseFloat(getComputedStyle(this.previewBar).paddingLeft); + + // Visible center of screen + const screenCenter = this.wrapper.clientWidth / 2; + + // Compute translation so activeCenter aligns with screenCenter + const translateX = screenCenter - activeCenter; + + this.previewBar.style.transform = + `translateX(${translateX}px)`; + }); + } + + zoomImage(e) { + const rect = this.image.getBoundingClientRect(); + + // Mouse position relative to image + const mouseX = e.clientX - rect.left; + const mouseY = e.clientY - rect.top; + + const prevScale = this.scale; + const zoomFactor = 1.2; + + let newScale = e.deltaY > 0 + ? prevScale / zoomFactor + : prevScale * zoomFactor; + newScale = Math.min(Math.max(newScale, 0.1), 8); + + const scaleRatio = newScale / prevScale; + + // Adjust offsets so the point under cursor stays fixed + this.offsetX -= mouseX * (scaleRatio - 1); + this.offsetY -= mouseY * (scaleRatio - 1); + + if (Math.abs(this.offsetX) < 0.0001) this.offsetX = 0; + if (Math.abs(this.offsetY) < 0.0001) this.offsetY = 0; + + this.scale = newScale; + this.applyTransform(); + } + + fitToScreen() { + if (!this.image.naturalWidth || !this.image.naturalHeight) return; + + const wrapperWidth = this.wrapper.clientWidth; + const wrapperHeight = this.wrapper.clientHeight; + + const imgWidth = this.image.naturalWidth; + const imgHeight = this.image.naturalHeight; + + const scaleX = wrapperWidth / imgWidth; + const scaleY = wrapperHeight / imgHeight; + + this.scale = Math.min(scaleX, scaleY); + + const scaledWidth = imgWidth * this.scale; + const scaledHeight = imgHeight * this.scale; + + this.offsetX = -(scaledWidth - imgWidth) / 2; + this.offsetY = -(scaledHeight - imgHeight) / 2; + this.applyTransform(); + } + /* ================ HELPERS ================= */ + + _isMouseOverElement(element, mouseX, mouseY) { + if (!element) return false; + const rect = element.getBoundingClientRect(); + return ( + mouseX >= rect.left && + mouseX <= rect.right && + mouseY >= rect.top && + mouseY <= rect.bottom + ); + } + + _isOverUI(mouseX, mouseY) { + if (this.previewBar && this._isMouseOverElement(this.previewBar, mouseX, mouseY)) { + return true + } + if (this.settingsBtn && this._isMouseOverElement(this.settingsBtn, mouseX, mouseY)) { + return true + } + if (this.settingsMenu && this._isMouseOverElement(this.settingsMenu, mouseX, mouseY)) { + return true + } + + return false + } + + _reset_hideUI_Timeout(timeout=3700) { + clearTimeout(this.hideTimeout); + + this.hideTimeout = setTimeout(() => { + if (this.slideshow && this.autohide) { + this.toggleUI(false, false); + } + }, timeout); + } + + toggleUI(show=null, reset=true) { + if (show==null) { + this.previewBar.classList.toggle('hidden') + this.settingsBtn.classList.toggle('hidden') + this.settingsMenu?.classList.toggle('hidden') + } else { + this.previewBar.classList.toggle('hidden', !show) + this.settingsBtn.classList.toggle('hidden', !show) + this.settingsMenu?.classList.toggle('hidden', !show) + } + if (reset) this._reset_hideUI_Timeout(); + } + + toggleSettingsMenu() { + if (this.settingsMenu) { + this.settingsMenu.remove(); + this.settingsMenu = null; + return; + } + + const menu = this.doc.createElement("div"); + menu.className = "settingsMenu" + + const autoBtn = this.doc.createElement("button"); + autoBtn.className = 'ttn-btn' + autoBtn.id = 'autoBtn' + autoBtn.innerText = `Autohide: ${this.autohide ? "ON" : "OFF"}`; + autoBtn.onclick = () => { + this.autohide = !this.autohide; + localStorage.setItem(STORAGE_KEYS.AUTOHIDE, + JSON.stringify(this.autohide) + ); + autoBtn.innerText = + `Autohide: ${this.autohide ? "ON" : "OFF"}`; + }; + + const invertBtn = this.doc.createElement("button"); + invertBtn.className = 'ttn-btn' + invertBtn.id = 'invertBtn' + invertBtn.innerText = `Wheel: ${this.invertctrl ? "ZOOM" : "SCROLL"}`; + invertBtn.onclick = () => { + this.invertctrl = !this.invertctrl; + localStorage.setItem(STORAGE_KEYS.INVERT, + JSON.stringify(this.invertctrl) + ); + invertBtn.innerText = + `Wheel: ${this.invertctrl ? "ZOOM" : "SCROLL"}`; + }; + + const slideBtn = this.doc.createElement("button"); + slideBtn.className = 'ttn-btn' + slideBtn.id = 'slideBtn' + slideBtn.innerText = `Slideshow: ${this.slideshow ? "ON" : "OFF"}`; + slideBtn.onclick = () => { + this.setSlideshow(!this.slideshow) + }; + + const fitScrnBtn = this.doc.createElement("button"); + fitScrnBtn.className = 'ttn-btn' + fitScrnBtn.id = 'fitScrnBtn' + fitScrnBtn.innerText = `Fit to Screen: ${this.fitscreentoggle ? "ON" : "OFF"}`; + fitScrnBtn.onclick = () => { + this.fitscreentoggle = !this.fitscreentoggle; + localStorage.setItem(STORAGE_KEYS.FITSCREEN, + JSON.stringify(this.fitscreentoggle)) + fitScrnBtn.innerText = `Fit to Screen: ${this.fitscreentoggle ? "ON" : "OFF"}`; + if (this.fitscreentoggle) this.fitToScreen() + } + + const infoEl = this.doc.createElement("p") + infoEl.textContent = "Up Arrow - Hide/Show UI\nDown Arrow - Toggle Slideshow\nLeft Arrow - Previous Image\nRight Arrow - Next Image\nF - Fit image to window" + + menu.appendChild(autoBtn); + menu.appendChild(this.doc.createElement("br")); + menu.appendChild(invertBtn); + menu.appendChild(this.doc.createElement("br")); + menu.appendChild(slideBtn); + if (this.mode != 'fullscreen') { + menu.appendChild(this.doc.createElement("br")); + menu.appendChild(fitScrnBtn); + } + + + this.wrapper.appendChild(menu); + this.settingsMenu = menu; + } + + setSlideshow(enabled) { + this.slideshow = enabled; + this.wrapper.classList.toggle("slideshow", enabled); + + if (this.settingsMenu) { + const slideBtn = this.settingsMenu.querySelector('#slideBtn'); + if (slideBtn) { + slideBtn.innerText = `Slideshow: ${this.slideshow ? "ON" : "OFF"}`; + } + } + + if (enabled) { + if (!this.comparing) this.setImage(-1); + if (this.autohide) this.toggleUI(false); + } else { + this.toggleUI(true); + } + } + + /* ================= EVENTS ================= */ + _onKeyDown = (e) => { + if (e.code === "ArrowLeft") { + e.preventDefault(); + this.prev(e.ctrlKey, e.shiftKey); + } + + if (e.code === "ArrowRight") { + e.preventDefault(); + this.next(e.ctrlKey, e.shiftKey); + } + + if (e.code === "ArrowDown") { + this.setSlideshow(!this.slideshow) + } + + if (e.code === "ArrowUp") { + this.toggleUI(); + } + + if (e.code === "Escape") { + if (this.mode === "fullscreen") { + if (this.doc.fullscreenElement) { + this.doc.exitFullscreen().catch(() => {}); + } + } else { + this.doc.defaultView.close(); + } + } + + if (e.code === "KeyF") { + this.fitToScreen() + } + } + + _onWheel = (e) => { + e.preventDefault(); + + const isZoom = (this.invertctrl && !e.ctrlKey) || + (!this.invertctrl && e.ctrlKey); + + if (isZoom) { + this.zoomImage(e); + return + } + + const now = performance.now(); + if (now - this._lastWheelTime < 40) return; + this._lastWheelTime = now; + + if (e.deltaY > 0) this.next(); + else this.prev(); + } + + _onMouseDown = (e) => { + if (!this._isOverUI(e.clientX, e.clientY)) { + e.preventDefault(); + this.dragging = true; + this.dragStartX = e.clientX; + this.dragStartY = e.clientY; + } + } + + _onMouseMove = (e) => { + if (this.dragging) { + const dx = e.clientX - this.dragStartX; + const dy = e.clientY - this.dragStartY; + + this.offsetX += dx; + this.offsetY += dy; + + this.dragStartX = e.clientX; + this.dragStartY = e.clientY; + + this.applyTransform(); + } + if (this.slideshow){ + if (this._isOverUI(e.clientX, e.clientY)) { + if (this.previewBar.classList.contains("hidden")) { + this.toggleUI(true); + } else { + this._reset_hideUI_Timeout(); + } + } + } + } + + _onMouseUp = (e) => { this.dragging = false; } + + _onClick = (e) => { + if (!this._isOverUI(e.clientX, e.clientY) && this.slideshow && this.autohide) { + this.toggleUI(false) + } + } + + _onDblClick = (e) => { + if (!this._isOverUI(e.clientX, e.clientY)) { + this.resetTransform(); + } + } + + _onFullscreenChange = () => { + if (this.doc.fullscreenElement) { + requestAnimationFrame(() => { + this.applyPreviewTranslation(); + }); + return; + } + + TTNViewer.fullscreenInstance = null; + this.destroy(); + }; + + attachEvents() { + this.doc.addEventListener("keydown", this._onKeyDown); + this.doc.addEventListener("wheel", this._onWheel, this._wheelOptions); + this.doc.addEventListener("mousedown", this._onMouseDown); + this.wrapper.addEventListener("mousemove", this._onMouseMove); + this.doc.addEventListener("mouseup", this._onMouseUp) + this.doc.addEventListener("click", this._onClick); + this.doc.addEventListener("dblclick", this._onDblClick); + this.doc.addEventListener("fullscreenchange", this._onFullscreenChange); + + this._lastWrapperSize = { w: 0, h: 0 }; + this._resizeObserver = new ResizeObserver(() => { + if (this._resizing) return; + const w = this.wrapper.clientWidth; + const h = this.wrapper.clientHeight; + + if (w === this._lastWrapperSize.w && + h === this._lastWrapperSize.h) { + return; + } + + this._lastWrapperSize = { w, h }; + + this._resizing = true; + + requestAnimationFrame(() => { + try { + if (this.fitscreentoggle) { + this.fitToScreen(); + } + this.applyPreviewTranslation(); + } finally { + this._resizing = false; + } + }); + }); + + this._resizeObserver.observe(this.wrapper); + } + + destroy() { + TTNViewer.instances.delete(this); + + this.doc.removeEventListener("keydown", this._onKeyDown); + this.doc.removeEventListener("wheel", this._onWheel, this._wheelOptions); + this.doc.removeEventListener("mousedown", this._onMouseDown); + this.wrapper.removeEventListener("mousemove", this._onMouseMove); + this.doc.removeEventListener("mouseup", this._onMouseUp) + this.doc.removeEventListener("click", this._onClick); + this.doc.removeEventListener("dblclick", this._onDblClick); + this.doc.removeEventListener("fullscreenchange", this._onFullscreenChange); + + if (this._resizeObserver) { + this._resizeObserver.disconnect(); + this._resizeObserver = null; + } + + this.wrapper?.remove(); + + } +} + +/* ========================================================= + LAUNCHERS +========================================================= */ + +function _getSelectedNode() { + const graphcanvas = LGraphCanvas.active_canvas; + if (graphcanvas.selected_nodes && + Object.keys(graphcanvas.selected_nodes).length === 1) { + return Object.values(graphcanvas.selected_nodes)[0]; + } + return null; +} + +function _getViewerNode() { + const node = _getSelectedNode() + if (node) return node + + let defaultNodeID = JSON.parse(localStorage.getItem(STORAGE_KEYS.DEFAULTNODE)) + if (defaultNodeID) { + let defaultNode = app.graph._nodes_by_id[defaultNodeID] + if (defaultNode) return defaultNode + } + + return null; +} + +export function _setDefaultFullscreenNode() { + let selectedNode = _getSelectedNode(); + if (selectedNode) { + localStorage.setItem(STORAGE_KEYS.DEFAULTNODE, JSON.stringify(selectedNode.id)); + } else { + localStorage.removeItem(STORAGE_KEYS.DEFAULTNODE); + } +} + +export function openFullscreenApp(node) { + if (TTNViewer.fullscreenInstance) return; + TTNViewer.fullscreenInstance = + new TTNViewer(node, document, "fullscreen"); +} + +export function openPopoutViewer(node) { + const win = window.open("", "_blank","width=512,height=512,resizable=yes"); + if (!win) return; + + TTN_POPOUTS.add(win); + win.addEventListener("beforeunload", () => { + TTN_POPOUTS.delete(win); + }); + + win.document.write(` + + TTN Viewer - [${node.id}] ${node.title} + `); + win.document.close(); + + new TTNViewer(node, win.document, "popout"); +} + +window.addEventListener("beforeunload", () => { + for (const win of TTN_POPOUTS) { + try { + win.close(); + } catch {} + } +}); + +/* ========================================================= + HOTKEYS +========================================================= */ + +document.addEventListener("keydown", (e) => { + if (e.code === "F11" && e.shiftKey) { + const node = _getViewerNode(); + if (node) openFullscreenApp(node); + } + + if (e.code === "F10" && e.shiftKey) { + const node = _getViewerNode(); + if (node) openPopoutViewer(node); + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNinterface.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNinterface.js new file mode 100644 index 0000000000000000000000000000000000000000..5964dc1fc8c0ec3b1db7b8cd75144930f158b2ef --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNinterface.js @@ -0,0 +1,537 @@ +import { app } from "../../scripts/app.js"; + +const customPipeLineLink = "#7737AA" +const customPipeLineSDXLLink = "#0DC52B" +const customIntLink = "#29699C" +const customXYPlotLink = "#74DA5D" +const customLoraStackLink = "#87C7B7" +const customStringLink = "#7CBB1A" + +var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; +if (!customLinkColors["PIPE_LINE"] || !LGraphCanvas.link_type_colors["PIPE_LINE"]) {customLinkColors["PIPE_LINE"] = customPipeLineLink;} +if (!customLinkColors["PIPE_LINE_SDXL"] || !LGraphCanvas.link_type_colors["PIPE_LINE_SDXL"]) {customLinkColors["PIPE_LINE_SDXL"] = customPipeLineSDXLLink;} +if (!customLinkColors["INT"] || !LGraphCanvas.link_type_colors["INT"]) {customLinkColors["INT"] = customIntLink;} +if (!customLinkColors["XYPLOT"] || !LGraphCanvas.link_type_colors["XYPLOT"]) {customLinkColors["XYPLOT"] = customXYPlotLink;} +if (!customLinkColors["ADV_XYPLOT"] || !LGraphCanvas.link_type_colors["ADV_XYPLOT"]) {customLinkColors["ADV_XYPLOT"] = customXYPlotLink;} +if (!customLinkColors["LORA_STACK"] || !LGraphCanvas.link_type_colors["LORA_STACK"]) {customLinkColors["LORA_STACK"] = customLoraStackLink;} +if (!customLinkColors["CONTROL_NET_STACK"] || !LGraphCanvas.link_type_colors["CONTROL_NET_STACK"]) {customLinkColors["CONTROL_NET_STACK"] = customLoraStackLink;} +if (!customLinkColors["STRING"] || !LGraphCanvas.link_type_colors["STRING"]) {customLinkColors["STRING"] = customStringLink;} + +localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors)); + +app.registerExtension({ + name: "comfy.ttN.interface", + init() { + function adjustToGrid(val, gridSize) { + return Math.round(val / gridSize) * gridSize; + } + + function moveNodeBasedOnKey(e, node, gridSize, shiftMult) { + switch (e.code) { + case 'ArrowUp': + node.pos[1] -= gridSize * shiftMult; + break; + case 'ArrowDown': + node.pos[1] += gridSize * shiftMult; + break; + case 'ArrowLeft': + node.pos[0] -= gridSize * shiftMult; + break; + case 'ArrowRight': + node.pos[0] += gridSize * shiftMult; + break; + } + node.setDirtyCanvas(true, true); + } + + function keyMoveNode(e, node) { + let gridSize = JSON.parse(localStorage.getItem('Comfy.Settings.Comfy.SnapToGrid.GridSize')); + gridSize = gridSize ? parseInt(gridSize) : 1; + let shiftMult = e.shiftKey ? 10 : 1; + + node.pos[0] = adjustToGrid(node.pos[0], gridSize); + node.pos[1] = adjustToGrid(node.pos[1], gridSize); + + moveNodeBasedOnKey(e, node, gridSize, shiftMult); + } + + function getSelectedNodes(e) { + const inputField = e.composedPath()[0]; + if (inputField.tagName === "TEXTAREA") return; + if (e.ctrlKey && ['ArrowUp', 'ArrowDown', 'ArrowLeft', 'ArrowRight'].includes(e.code)) { + let graphcanvas = LGraphCanvas.active_canvas; + for (let node in graphcanvas.selected_nodes) { + keyMoveNode(e, graphcanvas.selected_nodes[node]); + } + } + } + + window.addEventListener("keydown", getSelectedNodes, true); + + LGraphCanvas.prototype.ttNcreateDialog = function (htmlContent, onOK, onCancel) { + var dialog = document.createElement("div"); + dialog.is_modified = false; + dialog.className = "ttN-dialog"; + dialog.innerHTML = htmlContent + "OK"; + + dialog.close = function() { + if (dialog.parentNode) { + dialog.parentNode.removeChild(dialog); + } + }; + + var inputs = Array.from(dialog.querySelectorAll("input, select")); + + inputs.forEach(input => { + input.addEventListener("keydown", function(e) { + dialog.is_modified = true; + if (e.keyCode == 27) { // ESC + onCancel && onCancel(); + dialog.close(); + } else if (e.keyCode == 13) { // Enter + onOK && onOK(dialog, inputs.map(input => input.value)); + dialog.close(); + } else if (e.keyCode != 13 && e.target.localName != "textarea") { + return; + } + e.preventDefault(); + e.stopPropagation(); + }); + }); + + var graphcanvas = LGraphCanvas.active_canvas; + var canvas = graphcanvas.canvas; + + var rect = canvas.getBoundingClientRect(); + var offsetx = -20; + var offsety = -20; + if (rect) { + offsetx -= rect.left; + offsety -= rect.top; + } + + if (event) { + dialog.style.left = event.clientX + offsetx + "px"; + dialog.style.top = event.clientY + offsety + "px"; + } else { + dialog.style.left = canvas.width * 0.5 + offsetx + "px"; + dialog.style.top = canvas.height * 0.5 + offsety + "px"; + } + + var button = dialog.querySelector("#ok"); + button.addEventListener("click", function() { + onOK && onOK(dialog, inputs.map(input => input.value)); + dialog.close(); + }); + + canvas.parentNode.appendChild(dialog); + + if(inputs) inputs[0].focus(); + + var dialogCloseTimer = null; + dialog.addEventListener("mouseleave", function(e) { + if(LiteGraph.dialog_close_on_mouse_leave) + if (!dialog.is_modified && LiteGraph.dialog_close_on_mouse_leave) + dialogCloseTimer = setTimeout(dialog.close, LiteGraph.dialog_close_on_mouse_leave_delay); //dialog.close(); + }); + dialog.addEventListener("mouseenter", function(e) { + if(LiteGraph.dialog_close_on_mouse_leave) + if(dialogCloseTimer) clearTimeout(dialogCloseTimer); + }); + + return dialog; + }; + + LGraphCanvas.prototype.ttNsetNodeDimension = function (node) { + const nodeWidth = node.size[0]; + const nodeHeight = node.size[1]; + + let input_html = ""; + input_html += ""; + + LGraphCanvas.prototype.ttNcreateDialog("Width/Height" + input_html, + function(dialog, values) { + var widthValue = Number(values[0]) ? values[0] : nodeWidth; + var heightValue = Number(values[1]) ? values[1] : nodeHeight; + let sz = node.computeSize(); + node.setSize([Math.max(sz[0], widthValue), Math.max(sz[1], heightValue)]); + if (dialog.parentNode) { + dialog.parentNode.removeChild(dialog); + } + node.setDirtyCanvas(true, true); + }, + null + ); + }; + + LGraphCanvas.prototype.ttNsetSlotTypeColor = function(slot){ + var slotColor = LGraphCanvas.link_type_colors[slot.output.type].toUpperCase(); + var slotType = slot.output.type; + // Check if the color is in the correct format + if (!/^#([0-9A-F]{3}){1,2}$/i.test(slotColor)) { + slotColor = "#FFFFFF"; + } + + // Check if browser supports color input type + var inputType = "color"; + var inputID = " id='colorPicker'"; + var inputElem = document.createElement("input"); + inputElem.setAttribute("type", inputType); + if (inputElem.type !== "color") { + // If it doesn't, fall back to text input + inputType = "text"; + inputID = " "; + } + + let input_html = ""; + input_html += "DEFAULT"; // Add a default button + input_html += "RESET"; // Add a reset button + + var dialog = LGraphCanvas.prototype.ttNcreateDialog("" + slotType + "" + + input_html, + function(dialog, values){ + var hexColor = values[0].toUpperCase(); + + if (!/^#([0-9A-F]{3}){1,2}$/i.test(hexColor)) { + return + } + + if (hexColor === slotColor) { + return + } + + var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; + if (!customLinkColors[slotType + "_ORIG"]) {customLinkColors[slotType + "_ORIG"] = slotColor}; + customLinkColors[slotType] = hexColor; + localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors)); + + app.canvas.default_connection_color_byType[slotType] = hexColor; + LGraphCanvas.link_type_colors[slotType] = hexColor; + } + ); + + var resetButton = dialog.querySelector("#reset"); + resetButton.addEventListener("click", function() { + var colorInput = dialog.querySelector("input[type='" + inputType + "']"); + colorInput.value = slotColor; + }); + + var defaultButton = dialog.querySelector("#Default"); + defaultButton.addEventListener("click", function() { + var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; + if (customLinkColors[slotType+"_ORIG"]) { + app.canvas.default_connection_color_byType[slotType] = customLinkColors[slotType+"_ORIG"]; + LGraphCanvas.link_type_colors[slotType] = customLinkColors[slotType+"_ORIG"]; + + delete customLinkColors[slotType+"_ORIG"]; + delete customLinkColors[slotType]; + } + localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors)); + dialog.close() + }) + + var colorPicker = dialog.querySelector("input[type='" + inputType + "']"); + colorPicker.addEventListener("focusout", function(e) { + this.focus(); + }); + }; + + LGraphCanvas.prototype.ttNdefaultBGcolor = function(node, defaultBGColor){ + setTimeout(() => { + if (defaultBGColor !== 'default' && !node.color) { + node.addProperty('ttNbgOverride', defaultBGColor); + node.color=defaultBGColor.color; + node.bgcolor=defaultBGColor.bgcolor; + } + + if (node.color && node.properties.ttNbgOverride) { + if (node.properties.ttNbgOverride !== defaultBGColor && node.color === node.properties.ttNbgOverride.color) { + if (defaultBGColor === 'default') { + delete node.properties.ttNbgOverride + delete node.color + delete node.bgcolor + } else { + node.properties.ttNbgOverride = defaultBGColor + node.color=defaultBGColor.color; + node.bgcolor=defaultBGColor.bgcolor; + } + } + + if (node.properties.ttNbgOverride !== defaultBGColor && node.color !== node.properties.ttNbgOverride?.color) { + delete node.properties.ttNbgOverride + } + } + }, 0); + }; + + LGraphCanvas.prototype.ttNfixNodeSize = function(node){ + setTimeout(() => { + node.onResize?.(node.size); + }, 0); + }; + + LGraphCanvas.ttNonShowLinkStyles = function(value, options, e, menu, node) { + new LiteGraph.ContextMenu( + LiteGraph.LINK_RENDER_MODES, + { event: e, callback: inner_clicked, parentMenu: menu, node: node } + ); + + function inner_clicked(v) { + if (!node) { + return; + } + var kV = Object.values(LiteGraph.LINK_RENDER_MODES).indexOf(v); + + localStorage.setItem('Comfy.Settings.Comfy.LinkRenderMode', JSON.stringify(String(kV))); + + app.canvas.links_render_mode = kV; + app.graph.setDirtyCanvas(true); + } + + return false; + }; + + LGraphCanvas.ttNlinkStyleBorder = function(value, options, e, menu, node) { + new LiteGraph.ContextMenu( + [false, true], + { event: e, callback: inner_clicked, parentMenu: menu, node: node } + ); + + function inner_clicked(v) { + if (!node) { + return; + } + + localStorage.setItem('Comfy.Settings.ttN.links_render_border', JSON.stringify(v)); + + app.canvas.render_connections_border = v; + } + + return false; + }; + + LGraphCanvas.ttNlinkStyleShadow = function(value, options, e, menu, node) { + new LiteGraph.ContextMenu( + [false, true], + { event: e, callback: inner_clicked, parentMenu: menu, node: node } + ); + + function inner_clicked(v) { + if (!node) { + return; + } + + localStorage.setItem('Comfy.Settings.ttN.links_render_shadow', JSON.stringify(v)); + + app.canvas.render_connections_shadows = v; + } + + return false; + }; + + LGraphCanvas.ttNsetDefaultBGColor = function(value, options, e, menu, node) { + if (!node) { + throw "no node for color"; + } + + var values = []; + values.push({ + value: null, + content: + "No Color" + }); + + for (var i in LGraphCanvas.node_colors) { + var color = LGraphCanvas.node_colors[i]; + var value = { + value: i, + content: + "" + + i + + "" + }; + values.push(value); + } + new LiteGraph.ContextMenu(values, { + event: e, + callback: inner_clicked, + parentMenu: menu, + node: node + }); + + function inner_clicked(v) { + if (!node) { + return; + } + + var defaultBGColor = v.value ? LGraphCanvas.node_colors[v.value] : 'default'; + + localStorage.setItem('Comfy.Settings.ttN.defaultBGColor', JSON.stringify(defaultBGColor)); + + for (var i in app.graph._nodes) { + LGraphCanvas.prototype.ttNdefaultBGcolor(app.graph._nodes[i], defaultBGColor); + } + + node.setDirtyCanvas(true, true); + } + + return false; + }; + + LGraphCanvas.prototype.ttNupdateRenderSettings = function (app) { + let showLinkBorder = Number(localStorage.getItem('Comfy.Settings.ttN.links_render_border')); + if (showLinkBorder !== undefined) {app.canvas.render_connections_border = showLinkBorder} + + let showLinkShadow = Number(localStorage.getItem('Comfy.Settings.ttN.links_render_shadow')); + if (showLinkShadow !== undefined) {app.canvas.render_connections_shadows = showLinkShadow} + + let showExecOrder = localStorage.getItem('Comfy.Settings.ttN.showExecutionOrder'); + if (showExecOrder === 'true') {app.canvas.render_execution_order = true} + else {app.canvas.render_execution_order = false} + + var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {}; + Object.assign(app.canvas.default_connection_color_byType, customLinkColors); + Object.assign(LGraphCanvas.link_type_colors, customLinkColors); + } + }, + + beforeRegisterNodeDef(nodeType, nodeData, app) { + const originalGetSlotMenuOptions = nodeType.prototype.getSlotMenuOptions; + nodeType.prototype.getSlotMenuOptions = (slot) => { + originalGetSlotMenuOptions?.apply(this, slot); + let menu_info = []; + if ( + slot && + slot.output && + slot.output.links && + slot.output.links.length + ) { + menu_info.push({ content: "Disconnect Links", slot: slot }); + } + var _slot = slot.input || slot.output; + if (_slot.removable){ + menu_info.push( + _slot.locked + ? "Cannot remove" + : { content: "Remove Slot", slot: slot } + ); + } + if (!_slot.nameLocked){ + menu_info.push({ content: "Rename Slot", slot: slot }); + } + + menu_info.push({ content: "🌏 Slot Type Color", slot: slot, callback: () => { LGraphCanvas.prototype.ttNsetSlotTypeColor(slot) } }); + menu_info.push({ content: "🌏 Show Link Border", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNlinkStyleBorder }); + menu_info.push({ content: "🌏 Show Link Shadow", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNlinkStyleShadow }); + menu_info.push({ content: "🌏 Link Style", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNonShowLinkStyles }); + + return menu_info; + } + }, + + setup() { + LGraphCanvas.prototype.ttNupdateRenderSettings(app); + }, + nodeCreated(node) { + LGraphCanvas.prototype.ttNfixNodeSize(node); + let defaultBGColor = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.defaultBGColor')); + if (defaultBGColor) {LGraphCanvas.prototype.ttNdefaultBGcolor(node, defaultBGColor)}; + }, + loadedGraphNode(node, app) { + LGraphCanvas.prototype.ttNupdateRenderSettings(app); + + let defaultBGColor = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.defaultBGColor')); + if (defaultBGColor) {LGraphCanvas.prototype.ttNdefaultBGcolor(node, defaultBGColor)}; + }, +}); + +var styleElement = document.createElement("style"); +const cssCode = ` +.ttN-dialog { + top: 10px; + left: 10px; + min-height: 1em; + background-color: var(--comfy-menu-bg); + font-size: 1.2em; + box-shadow: 0 0 7px black !important; + z-index: 10; + display: grid; + border-radius: 7px; + padding: 7px 7px; + position: fixed; +} +.ttN-dialog .name { + display: inline-block; + min-height: 1.5em; + font-size: 14px; + font-family: sans-serif; + color: var(--descrip-text); + padding: 0; + vertical-align: middle; + justify-self: center; +} +.ttN-dialog input, +.ttN-dialog textarea, +.ttN-dialog select { + margin: 3px; + min-width: 60px; + min-height: 1.5em; + background-color: var(--comfy-input-bg); + border: 2px solid; + border-color: var(--border-color); + color: var(--input-text); + border-radius: 14px; + padding-left: 10px; + outline: none; +} + +.ttN-dialog #colorPicker { + margin: 0px; + min-width: 100%; + min-height: 2.5em; + border-radius: 0px; + padding: 0px 2px 0px 2px; + border: unset; +} + +.ttN-dialog textarea { + min-height: 150px; +} + +.ttN-dialog button { + margin-top: 3px; + vertical-align: top; + background-color: #999; + border: 0; + padding: 4px 18px; + border-radius: 20px; + cursor: pointer; +} + +.ttN-dialog button.rounded, +.ttN-dialog input.rounded { + border-radius: 0 12px 12px 0; +} + +.ttN-dialog .helper { + overflow: auto; + max-height: 200px; +} + +.ttN-dialog .help-item { + padding-left: 10px; +} + +.ttN-dialog .help-item:hover, +.ttN-dialog .help-item.selected { + cursor: pointer; + background-color: white; + color: black; +} +` +styleElement.innerHTML = cssCode +document.head.appendChild(styleElement); diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNwidgets.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNwidgets.js new file mode 100644 index 0000000000000000000000000000000000000000..b46d5be5e9f4cd8c158b1711d76645fb1ff241d7 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNwidgets.js @@ -0,0 +1,154 @@ +import { app } from "../../scripts/app.js"; +import { ComfyWidgets } from "../../scripts/widgets.js"; + +class SeedControl { + constructor(node) { + this.node = node; + + for (const [i, w] of this.node.widgets.entries()) { + if (w.name === "seed" || w.name === "noise_seed") { + this.seedWidget = w; + } + else if (w.name === "control_after_generate" || w.name === "control_before_generate") { + this.controlWidget = w; + } + } + if (!this.seedWidget) { + throw new Error("Something's wrong; expected seed widget"); + } + const randMax = Math.min(1125899906842624, this.seedWidget.options.max); + const randMin = Math.max(0, this.seedWidget.options.min); + const randomRange = (randMax - Math.max(0, randMin)) / (this.seedWidget.options.step / 10); + this.randomSeedButton = this.node.addWidget("button", "🎲 New Fixed Random", null, () => { + this.seedWidget.value = + Math.floor(Math.random() * randomRange) * (this.seedWidget.options.step / 10) + randMin; + this.controlWidget.value = "fixed"; + }, { serialize: false }); + + this.seedWidget.linkedWidgets = [this.randomSeedButton, this.controlWidget]; + } +} + +function addTextDisplay(nodeType) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const r = onNodeCreated?.apply(this, arguments); + const w = ComfyWidgets["STRING"](this, "display", ["STRING", { multiline: true, placeholder: " " }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.7; + w.inputEl.style.cursor = "auto"; + return r; + }; + + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + + for (const widget of this.widgets) { + if (widget.type === "customtext" && widget.name === "display" && widget.inputEl.readOnly === true) { + widget.value = message.text.join(''); + } + } + + this.onResize?.(this.size); + }; +} + +function overwriteSeedControl(nodeType) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + this.seedControl = new SeedControl(this); + } +} + +const HAS_EXECUTED = Symbol(); +class IndexControl { + constructor(node) { + this.node = node; + this.node.properties = this.node.properties || {}; + for (const [i, w] of this.node.widgets.entries()) { + if (w.name === "index") { + this.indexWidget = w; + } + else if (w.name === "index_control") { + this.controlWidget = w; + } else if (w.name === "text") { + this.textWidget = w; + } + } + + if (!this.indexWidget) { + throw new Error("Something's wrong; expected index widget"); + } + + const applyWidgetControl = () => { + var v = this.controlWidget.value; + + //number + let min = this.indexWidget.options.min; + let max = this.textWidget.value.split("\n").length - 1; + // limit to something that javascript can handle + max = Math.min(1125899906842624, max); + min = Math.max(-1125899906842624, min); + + //adjust values based on valueControl Behaviour + switch (v) { + case "fixed": + break; + case "increment": + this.indexWidget.value += 1; + break; + case "decrement": + this.indexWidget.value -= 1; + break; + case "randomize": + this.indexWidget.value = Math.floor(Math.random() * (max - min + 1)) + min; + default: + break; + } + /*check if values are over or under their respective + * ranges and set them to min or max.*/ + if (this.indexWidget.value < min) this.indexWidget.value = max; + + if (this.indexWidget.value > max) + this.indexWidget.value = min; + this.indexWidget.callback(this.indexWidget.value); + }; + + this.controlWidget.beforeQueued = () => { + // Don't run on first execution + if (this.controlWidget[HAS_EXECUTED]) { + applyWidgetControl(); + } + this.controlWidget[HAS_EXECUTED] = true; + }; + + this.indexWidget.linkedWidgets = [this.controlWidget]; + } +} + +function overwriteIndexControl(nodeType) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; + this.indexControl = new IndexControl(this); + } +} + +app.registerExtension({ + name: "comfy.ttN.widgets", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name.startsWith("ttN ") && ["ttN pipeLoader_v2", "ttN pipeKSampler_v2", "ttN pipeKSamplerAdvanced_v2", "ttN pipeLoaderSDXL_v2", "ttN pipeKSamplerSDXL_v2", "ttN KSampler_v2"].includes(nodeData.name)) { + if (nodeData.output_name.includes('seed')) { + overwriteSeedControl(nodeType) + } + } + if (["ttN textDebug", "ttN advPlot range", "ttN advPlot string", "ttN advPlot combo", "ttN debugInput", "ttN textOutput", "ttN advPlot merge"].includes(nodeData.name)) { + addTextDisplay(nodeType) + } + if (nodeData.name.startsWith("ttN textCycle")) { + overwriteIndexControl(nodeType) + } + }, +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNxyPlot.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNxyPlot.js new file mode 100644 index 0000000000000000000000000000000000000000..6c6941a842741f713dcc14309cb206538420c7cd --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNxyPlot.js @@ -0,0 +1,212 @@ +import { app } from "../../scripts/app.js"; +import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttNdropdown.js"; + +function generateNumList(dictionary) { + const minimum = dictionary["min"] || 0; + const maximum = dictionary["max"] || 0; + const step = dictionary["step"] || 1; + + if (step === 0) { + return []; + } + + const result = []; + let currentValue = minimum; + + while (currentValue <= maximum) { + if (Number.isInteger(step)) { + result.push(Math.round(currentValue) + '; '); + } else { + let formattedValue = currentValue.toFixed(3); + if(formattedValue == -0.000){ + formattedValue = '0.000'; + } + if (!/\.\d{3}$/.test(formattedValue)) { + formattedValue += "0"; + } + result.push(formattedValue + "; "); + } + currentValue += step; + } + + if (maximum >= 0 && minimum >= 0) { + //low to high + return result; + } + else { + //high to low + return result.reverse(); + } +} + +let plotDict = {}; +let currentOptionsDict = {}; + +function getCurrentOptionLists(node, widget) { + const nodeId = String(node.id); + const widgetName = widget.name; + const widgetValue = widget.value.replace(/^(loader|sampler):\s/, ''); + + if (!currentOptionsDict[nodeId] || !currentOptionsDict[nodeId][widgetName]) { + currentOptionsDict[nodeId] = {...currentOptionsDict[nodeId], [widgetName]: plotDict[widgetValue]}; + } else if (currentOptionsDict[nodeId][widgetName] != plotDict[widgetValue]) { + currentOptionsDict[nodeId][widgetName] = plotDict[widgetValue]; + } +} + +function addGetSetters(node) { + if (node.widgets) + for (const w of node.widgets) { + if (w.name === "x_axis" || + w.name === "y_axis") { + let widgetValue = w.value; + + // Define getters and setters for widget values + Object.defineProperty(w, 'value', { + + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + getCurrentOptionLists(node, w); + } + } + }); + } + } +} + +function dropdownCreator(node) { + if (node.widgets) { + const widgets = node.widgets.filter( + (n) => (n.type === "customtext" && n.dynamicPrompts !== false) || n.dynamicPrompts + ); + + for (const w of widgets) { + function replaceOptionSegments(selectedOption, inputSegments, cursorSegmentIndex, optionsList) { + if (selectedOption) { + inputSegments[cursorSegmentIndex] = selectedOption; + } + + return inputSegments.map(segment => verifySegment(segment, optionsList)) + .filter(item => item !== '') + .join(''); + } + + function verifySegment(segment, optionsList) { + segment = cleanSegment(segment); + + if (isInOptionsList(segment, optionsList)) { + return segment + '; '; + } + + let matchedOptions = findMatchedOptions(segment, optionsList); + + if (matchedOptions.length === 1 || matchedOptions.length === 2) { + return matchedOptions[0]; + } + + if (isInOptionsList(formatNumberSegment(segment), optionsList)) { + return formatNumberSegment(segment) + '; '; + } + + return ''; + } + + function cleanSegment(segment) { + return segment.replace(/(\n|;| )/g, ''); + } + + function isInOptionsList(segment, optionsList) { + return optionsList.includes(segment + '; '); + } + + function findMatchedOptions(segment, optionsList) { + return optionsList.filter(option => option.toLowerCase().includes(segment.toLowerCase())); + } + + function formatNumberSegment(segment) { + if (Number(segment)) { + return Number(segment).toFixed(3); + } + + if (['0', '0.', '0.0', '0.00', '00'].includes(segment)) { + return '0.000'; + } + return segment; + } + + + const onInput = function () { + const nodeId = node.id; + const axisWidgetName = w.name[0] + '_axis'; + + let optionsList = currentOptionsDict[nodeId]?.[axisWidgetName] || []; + if (optionsList.length === 0) {return} + + const inputText = w.inputEl.value; + const cursorPosition = w.inputEl.selectionStart; + + let inputSegments = inputText.split('; '); + + const cursorSegmentIndex = inputText.substring(0, cursorPosition).split('; ').length - 1; + const currentSegment = inputSegments[cursorSegmentIndex]; + const currentSegmentLower = currentSegment.replace(/\n/g, '').toLowerCase(); + + const filteredOptionsList = optionsList.filter(option => option.toLowerCase().includes(currentSegmentLower)).map(option => option.replace(/; /g, '')); + + if (filteredOptionsList.length > 0) { + ttN_CreateDropdown(w.inputEl, filteredOptionsList, (selectedOption) => { + const verifiedText = replaceOptionSegments(selectedOption, inputSegments, cursorSegmentIndex, optionsList); + w.inputEl.value = verifiedText; + }); + } + else { + ttN_RemoveDropdown(); + const verifiedText = replaceOptionSegments(null, inputSegments, cursorSegmentIndex, optionsList); + w.inputEl.value = verifiedText; + } + }; + + w.inputEl.removeEventListener('input', onInput); + w.inputEl.addEventListener('input', onInput); + w.inputEl.removeEventListener('mouseup', onInput); + w.inputEl.addEventListener('mouseup', onInput); + } + } +} + +app.registerExtension({ + name: "comfy.ttN.xyPlot", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "ttN xyPlot") { + plotDict = nodeData.input.hidden.plot_dict[0]; + + for (const key in plotDict) { + const value = plotDict[key]; + if (Array.isArray(value)) { + let updatedValues = []; + for (const v of value) { + updatedValues.push(v + '; '); + } + plotDict[key] = updatedValues; + } else if (typeof(value) === 'object') { + plotDict[key] = generateNumList(value); + } else { + plotDict[key] = value + '; '; + } + } + plotDict["None"] = []; + plotDict["---------------------"] = []; + } + }, + nodeCreated(node) { + if (node.constructor.title === "xyPlot") { + addGetSetters(node); + dropdownCreator(node); + + } + } +}); \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNxyPlotAdv.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNxyPlotAdv.js new file mode 100644 index 0000000000000000000000000000000000000000..74386b20ef96ffaed81c91afa47d06c1005887be --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/ttNxyPlotAdv.js @@ -0,0 +1,814 @@ +import { app } from "../../scripts/app.js"; +import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttNdropdown.js"; + +const widgets_to_ignore = ['control_after_generate', 'empty_latent_aspect', 'empty_latent_width', 'empty_latent_height', 'batch_size'] +const valueCompletionRegex = /^\[(\d+):([^=\]]+)=(['"])([^'"]*)$/ +const widgetCompletionRegex = /^\[(\d+):([^=\]]*)$/ +const nodeCompletionRegex = /^\[([^:\]=]*)$/ +const nodeLabelRegex = /^\[(\d+)\]\s-\s(.+)$/ + +function getWidgetsOptions(node) { + const widgetsOptions = {} + const widgets = node.widgets + if (!widgets) return + for (const w of widgets) { + if (!w.type || !w.options) continue + const current_value = w.value + if (widgets_to_ignore.includes(w.name)) continue + //console.log(`WIDGET ${w.name}, ${w.type}, ${w.options}`) + if (w.name === 'seed' || (w.name === 'value' && node.constructor.title.toLowerCase() === 'seed')) { + widgetsOptions[w.name] = {'Random Seed': `${w.options.max}/${w.options.min}/${w.options.step}`} + continue + } + if (w.type === 'ttNhidden') { + if (w.options['max']) { + widgetsOptions[w.name] = {[current_value]: null} + continue + } else if (!w.options['values']) { + widgetsOptions[w.name] = {'string': null} + continue + } + } + if (w.type.startsWith('converted') || w.type === 'button') { + continue + } + if (w.type === 'toggle') { + widgetsOptions[w.name] = {'True': null, 'False': null} + continue + } + if (['customtext', 'text', 'string'].includes(w.type)) { + widgetsOptions[w.name] = {'string': null} + continue + } + if (w.type === 'number') { + widgetsOptions[w.name] = {[current_value]: null} + continue + } + let valueDict = {} + if (w.options.values) { + let vals = w.options.values; + + if (typeof w.options.values === 'function') { + vals = w.options.values() + } + + for (const v of vals) { + valueDict[v] = null + } + } + widgetsOptions[w.name] = valueDict + } + + //console.log('WIDGETS OPTIONS', widgetsOptions) + if (Object.keys(widgetsOptions).length === 0) { + return null + } + return widgetsOptions; +} + +function _addInputIDs(node, inputIDs, IDsToCheck) { + if (node.inputs) { + for (const input of node.inputs) { + if (input.link) { + let originID = node.graph.links[input.link].origin_id + inputIDs.push(originID); + if (!IDsToCheck.includes(originID)) { + IDsToCheck.push(originID); + } + } + } + } +} + +function _recursiveGetInputIDs(node) { + const inputIDs = []; + const IDsToCheck = [node.id]; + + while (IDsToCheck.length > 0) { + const currentID = IDsToCheck.pop(); + const currentNode = node.graph._nodes_by_id[currentID]; + if (currentNode.type === "ttN advanced xyPlot") { + continue + } + _addInputIDs(currentNode, inputIDs, IDsToCheck); + } + + return inputIDs; +} + +function getNodesWidgetsDict(xyNode, plotLines=false) { + const nodeWidgets = {}; + if (plotLines) { + nodeWidgets['Add Plot Line'] = {'Only Values Label': null, 'Title and Values Label': null, 'ID, Title and Values Label': null}; + } + + const xyNodeLinks = xyNode.outputs[0]?.links + if (!xyNodeLinks || xyNodeLinks.length == 0) { + nodeWidgets['Connect to advanced xyPlot for options'] = null + return nodeWidgets + } + + const plotNodeLink = xyNodeLinks[0] + const plotNodeID = xyNode.graph.links[plotNodeLink].target_id + const plotNodeTitle = xyNode.graph._nodes_by_id[plotNodeID].constructor.title + const plotNode = app.graph._nodes_by_id[plotNodeID] + + const options = getWidgetsOptions(plotNode) + if (options) { + nodeWidgets[`[${plotNodeID}] - ${plotNodeTitle}`] = options + } + + const inputIDS = _recursiveGetInputIDs(plotNode) + for (const iID of inputIDS) { + const iNode = app.graph._nodes_by_id[iID]; + const iNodeTitle = iNode.constructor.title + if (iNodeTitle === 'advanced xyPlot') { + continue + } + const options = getWidgetsOptions(iNode) + if (!options) continue + nodeWidgets[`[${iID}] - ${iNodeTitle}`] = options + } + return nodeWidgets +} + +function getOpenExpressionContext(inputText, cursorPosition) { + const textBeforeCursor = inputText.slice(0, cursorPosition); + const expressionStart = textBeforeCursor.lastIndexOf('['); + + if (expressionStart === -1 || textBeforeCursor.indexOf(']', expressionStart) !== -1) { + return null; + } + + return { + expressionStart, + expressionBeforeCursor: textBeforeCursor.slice(expressionStart), + }; +} + +function getValueCompletionContext(inputText, cursorPosition) { + const expressionContext = getOpenExpressionContext(inputText, cursorPosition); + if (!expressionContext) { + return null; + } + + const expressionBeforeCursor = expressionContext.expressionBeforeCursor; + const match = expressionBeforeCursor.match(valueCompletionRegex); + if (!match) { + return null; + } + + const [, nodeId, rawWidgetName, quoteChar, valueQuery] = match; + const widgetName = rawWidgetName.trim(); + const replaceEndIndex = inputText.indexOf(']', cursorPosition); + + return { + nodeId, + widgetName, + lookupWidgetName: widgetName.replace(/\.append$/, ''), + quoteChar, + valueQuery, + replaceStart: expressionContext.expressionStart, + replaceEnd: replaceEndIndex === -1 ? cursorPosition : replaceEndIndex + 1, + }; +} + +function getWidgetCompletionContext(inputText, cursorPosition) { + const expressionContext = getOpenExpressionContext(inputText, cursorPosition); + if (!expressionContext) { + return null; + } + + const match = expressionContext.expressionBeforeCursor.match(widgetCompletionRegex); + if (!match) { + return null; + } + + const [, nodeId, rawWidgetQuery] = match; + const widgetStart = expressionContext.expressionStart + nodeId.length + 2; + const equalIndex = inputText.indexOf('=', widgetStart); + const bracketIndex = inputText.indexOf(']', widgetStart); + const hasEquals = equalIndex !== -1 && (bracketIndex === -1 || equalIndex < bracketIndex); + const widgetEnd = hasEquals ? equalIndex : (bracketIndex === -1 ? cursorPosition : Math.min(cursorPosition, bracketIndex)); + + return { + nodeId, + widgetQuery: rawWidgetQuery.trim(), + widgetStart, + widgetEnd, + hasEquals, + }; +} + +function getNodeCompletionContext(inputText, cursorPosition) { + const expressionContext = getOpenExpressionContext(inputText, cursorPosition); + if (!expressionContext) { + return null; + } + + const match = expressionContext.expressionBeforeCursor.match(nodeCompletionRegex); + if (!match) { + return null; + } + + const nodeStart = expressionContext.expressionStart + 1; + const colonIndex = inputText.indexOf(':', nodeStart); + const equalIndex = inputText.indexOf('=', nodeStart); + const bracketIndex = inputText.indexOf(']', nodeStart); + + const delimiters = [colonIndex, equalIndex, bracketIndex].filter((index) => index !== -1); + const firstDelimiterIndex = delimiters.length > 0 ? Math.min(...delimiters) : -1; + const hasColon = colonIndex !== -1 && (firstDelimiterIndex === -1 || colonIndex === firstDelimiterIndex); + const nodeEnd = hasColon ? colonIndex : (firstDelimiterIndex === -1 ? cursorPosition : Math.min(cursorPosition, firstDelimiterIndex)); + + return { + nodeQuery: match[1].trim(), + nodeStart, + nodeEnd, + hasColon, + }; +} + +function getNodeWidgetOptions(nodeWidgets, nodeId) { + const nodeKey = Object.keys(nodeWidgets).find((key) => key.startsWith(`[${nodeId}] - `)); + if (!nodeKey) { + return null; + } + + const widgetOptions = nodeWidgets[nodeKey]; + if (!widgetOptions || typeof widgetOptions !== 'object') { + return null; + } + + return widgetOptions; +} + +function getNodeWidgetValues(nodeWidgets, nodeId, widgetName, lookupWidgetName) { + const widgetOptions = getNodeWidgetOptions(nodeWidgets, nodeId); + if (!widgetOptions) { + return []; + } + + const valuesDict = widgetOptions[widgetName] ?? widgetOptions[lookupWidgetName]; + if (!valuesDict || typeof valuesDict !== 'object') { + return []; + } + + return Object.keys(valuesDict).filter((value) => value && value !== 'string'); +} + +function getNodeWidgetNames(nodeWidgets, nodeId) { + const widgetOptions = getNodeWidgetOptions(nodeWidgets, nodeId); + if (!widgetOptions) { + return []; + } + + return Object.keys(widgetOptions).filter((widgetName) => widgetName && widgetName !== 'string'); +} + +function getNodeEntries(nodeWidgets) { + return Object.keys(nodeWidgets) + .map((key) => { + const match = key.match(nodeLabelRegex); + if (!match) { + return null; + } + const [, nodeId, nodeTitle] = match; + return { + nodeId, + nodeTitle, + label: `[${nodeId}] - ${nodeTitle}`, + searchText: `${nodeId} ${nodeTitle}`, + }; + }) + .filter(Boolean); +} + +function rankAutocompleteEntries(entries, query, textSelector = (entry) => entry) { + const normalizedQuery = query.toLowerCase().trim(); + const tokens = normalizedQuery.split(/\s+/).filter(Boolean); + + if (tokens.length === 0) { + return entries; + } + + return entries + .map((entry) => { + const normalizedValue = textSelector(entry).toLowerCase(); + if (tokens.some((token) => !normalizedValue.includes(token))) { + return null; + } + + let score = 0; + + if (normalizedValue.includes(normalizedQuery)) { + score += 120; + } + if (normalizedValue.startsWith(normalizedQuery)) { + score += 60; + } + + for (const token of tokens) { + const tokenIndex = normalizedValue.indexOf(token); + if (tokenIndex === 0) { + score += 24; + } + score += Math.max(0, 12 - Math.min(tokenIndex, 12)); + } + + const firstTokenIndex = normalizedValue.indexOf(tokens[0]); + return { + entry, + score, + firstTokenIndex: firstTokenIndex === -1 ? Number.MAX_SAFE_INTEGER : firstTokenIndex, + normalizedValue, + }; + }) + .filter(Boolean) + .sort((a, b) => b.score - a.score || a.firstTokenIndex - b.firstTokenIndex || a.normalizedValue.localeCompare(b.normalizedValue)) + .map((item) => item.entry); +} + +function rankWidgetValues(values, query) { + const uniqueValues = [...new Set(values)]; + return rankAutocompleteEntries(uniqueValues, query); +} + +function insertWidgetValue(inputEl, inputText, context, selectedOption) { + const replacement = `[${context.nodeId}:${context.widgetName}=${context.quoteChar}${selectedOption}${context.quoteChar}]`; + const nextValue = inputText.slice(0, context.replaceStart) + replacement + inputText.slice(context.replaceEnd); + inputEl.value = nextValue; + + const cursorIndex = context.replaceStart + replacement.length; + inputEl.setSelectionRange(cursorIndex, cursorIndex); +} + +function insertWidgetName(inputEl, inputText, context, selectedWidgetName) { + const before = inputText.slice(0, context.widgetStart); + const after = inputText.slice(context.widgetEnd); + + let nextValue = before + selectedWidgetName + after; + let cursorIndex = context.widgetStart + selectedWidgetName.length; + + if (!context.hasEquals) { + nextValue = nextValue.slice(0, cursorIndex) + "='" + nextValue.slice(cursorIndex); + cursorIndex += 2; + } + + inputEl.value = nextValue; + inputEl.setSelectionRange(cursorIndex, cursorIndex); +} + +function insertNodeId(inputEl, inputText, context, selectedNodeId) { + const before = inputText.slice(0, context.nodeStart); + const after = inputText.slice(context.nodeEnd); + const separator = context.hasColon ? '' : ':'; + const nextValue = before + selectedNodeId + separator + after; + const cursorIndex = context.nodeStart + selectedNodeId.length + 1; + + inputEl.value = nextValue; + inputEl.setSelectionRange(cursorIndex, cursorIndex); +} + +function showAutocompleteOptions(inputEl, options, onSelect) { + if (options.length === 0) { + ttN_RemoveDropdown(); + return; + } + + ttN_CreateDropdown(inputEl, options, onSelect); +} + +function tryValueCompletion(inputEl, inputText, cursorPosition, nodeWidgets) { + const valueCompletionContext = getValueCompletionContext(inputText, cursorPosition); + if (!valueCompletionContext) { + return false; + } + + const widgetValues = getNodeWidgetValues( + nodeWidgets, + valueCompletionContext.nodeId, + valueCompletionContext.widgetName, + valueCompletionContext.lookupWidgetName, + ); + + const filteredValues = rankWidgetValues(widgetValues, valueCompletionContext.valueQuery); + showAutocompleteOptions(inputEl, filteredValues, (selectedOption) => { + insertWidgetValue(inputEl, inputEl.value, valueCompletionContext, selectedOption); + }); + return true; +} + +function tryWidgetCompletion(inputEl, inputText, cursorPosition, nodeWidgets) { + const widgetCompletionContext = getWidgetCompletionContext(inputText, cursorPosition); + if (!widgetCompletionContext) { + return false; + } + + const widgetNames = getNodeWidgetNames(nodeWidgets, widgetCompletionContext.nodeId); + const filteredWidgetNames = rankAutocompleteEntries(widgetNames, widgetCompletionContext.widgetQuery); + showAutocompleteOptions(inputEl, filteredWidgetNames, (selectedWidgetName) => { + insertWidgetName(inputEl, inputEl.value, widgetCompletionContext, selectedWidgetName); + }); + return true; +} + +function tryNodeCompletion(inputEl, inputText, cursorPosition, nodeWidgets) { + const nodeCompletionContext = getNodeCompletionContext(inputText, cursorPosition); + if (!nodeCompletionContext) { + return false; + } + + const nodeEntries = getNodeEntries(nodeWidgets); + const filteredNodeEntries = rankAutocompleteEntries(nodeEntries, nodeCompletionContext.nodeQuery, (nodeEntry) => nodeEntry.searchText); + const nodeIdByLabel = new Map(filteredNodeEntries.map((nodeEntry) => [nodeEntry.label, nodeEntry.nodeId])); + const nodeOptions = filteredNodeEntries.map((nodeEntry) => nodeEntry.label); + + showAutocompleteOptions(inputEl, nodeOptions, (selectedNodeLabel) => { + const selectedNodeId = nodeIdByLabel.get(selectedNodeLabel); + if (!selectedNodeId) { + return; + } + insertNodeId(inputEl, inputEl.value, nodeCompletionContext, selectedNodeId); + }); + return true; +} + +function dropdownCreator(node) { + if (node.widgets) { + const widgets = node.widgets.filter( + (n) => (n.type === "customtext") + ); + + for (const w of widgets) { + + const onInput = function () { + const nodeWidgets = getNodesWidgetsDict(node, true); + const inputText = w.inputEl.value; + const cursorPosition = w.inputEl.selectionStart; + + if (tryValueCompletion(w.inputEl, inputText, cursorPosition, nodeWidgets)) { + return; + } + + if (tryWidgetCompletion(w.inputEl, inputText, cursorPosition, nodeWidgets)) { + return; + } + + if (tryNodeCompletion(w.inputEl, inputText, cursorPosition, nodeWidgets)) { + return; + } + + let lines = inputText.split('\n'); + if (lines.length === 0) return; + + let cursorLineIndex = 0; + let lineStartPosition = 0; + + for (let i = 0; i < lines.length; i++) { + const lineEndPosition = lineStartPosition + lines[i].length; + if (cursorPosition <= lineEndPosition) { + cursorLineIndex = i; + break; + } + lineStartPosition = lineEndPosition + 1; + } + + ttN_CreateDropdown(w.inputEl, nodeWidgets, (selectedOption, fullpath) => { + const data = fullpath.split('###'); + const parts = data[0].split('/'); + let output; + if (parts[0] === 'Add Plot Line') { + const labelType = parts[1]; + let label; + switch (labelType) { + case 'Only Values Label': + label = 'v_label'; + break; + case 'Title and Values Label': + label = 'tv_label'; + break; + case 'ID, Title and Values Label': + label = 'idtv_label'; + break; + } + + let lastOpeningAxisBracket = -1; + let lastClosingAxisBracket = -1; + + let bracketCount = 0; + for (let i = 0; i < inputText.length; i++) { + if (inputText[i] === '[') { + bracketCount++; + } else if (inputText[i] === ']') { + bracketCount--; + } else if (inputText[i] === '<' && bracketCount === 0) { + lastOpeningAxisBracket = i; + } else if (inputText[i] === '>' && bracketCount === 0) { + lastClosingAxisBracket = i; + } + } + + const lastAxisBracket = inputText.substring(lastOpeningAxisBracket + 1, lastClosingAxisBracket).split(':')[0]; + let nextAxisBracketNumber; + + if (inputText.trim() === '') { + w.inputEl.value = `<1:${label}>\n`; + return + } + + if (lastAxisBracket) { + const lastAxisBracketNumber = Number(lastAxisBracket); + if (!isNaN(lastAxisBracketNumber)) { + nextAxisBracketNumber = lastAxisBracketNumber + 1; + output = `<${nextAxisBracketNumber}:${label}>\n`; + if (inputText[inputText.length - 1] === '\n') { + w.inputEl.value = `${inputText}${output}` + } else { + w.inputEl.value = `${inputText}\n${output}` + } + return + } + } + return + } + if (parts[0] === 'Connect to advanced xyPlot for options') { + return + } + + if (selectedOption === 'Random Seed') { + const [max, min, step] = data[1].split('/'); + + const randMax = Math.min(1125899906842624, Number(max)); + const randMin = Math.max(0, Number(min)); + const randomRange = (randMax - Math.max(0, randMin)) / (Number(step) / 10); + selectedOption = Math.floor(Math.random() * randomRange) * (Number(step) / 10) + randMin; + } + const nodeID = data[0].split(' - ')[0].replace('[', '').replace(']', ''); + + output = `[${nodeID}:${parts[1]}='${selectedOption}']`; + + if (inputText.trim() === '') { + output = `<1:v_label>\n` + output; + } + + if (lines[cursorLineIndex].trim() === '') { + lines[cursorLineIndex] = output; + } else { + lines.splice(cursorLineIndex + 1, 0, output); + } + + w.inputEl.value = lines.join('\n'); + + }, true); + }; + + w.inputEl.removeEventListener('input', onInput); + w.inputEl.addEventListener('input', onInput); + w.inputEl.removeEventListener('mouseup', onInput); + w.inputEl.addEventListener('mouseup', onInput); + } + } +} + +function findUpstreamXYPlot(targetID) { + const currentNode = app.graph._nodes_by_id[targetID]; + if (!currentNode) { + return + } + if (currentNode.constructor.title === 'advanced xyPlot') { + return currentNode; + } else { + if (!currentNode.outputs) { + return + } + for (const output of currentNode.outputs) { + if (output.links?.length > 0) { + for (const link of output.links) { + const xyPlotNode = findUpstreamXYPlot(app.graph.links[link].target_id) + if (xyPlotNode) { + return xyPlotNode + } + } + } + } + } +} + +function setPlotNodeOptions(currentNode, targetID=null) { + if (!targetID) { + for (const output of currentNode.outputs) { + if (output.links?.length > 0) { + for (const link of output.links) { + targetID = app.graph.links[link].target_id + } + } + } + } + const xyPlotNode = findUpstreamXYPlot(targetID) + if (!xyPlotNode) { + return + } + const widgets_dict = getNodesWidgetsDict(xyPlotNode) + const currentWidget = currentNode.widgets.find(w => w.name === 'node'); + if (currentWidget) { + currentWidget.options.values = Object.keys(widgets_dict) + } +} + +function setPlotWidgetOptions(currentNode, searchType) { + const { value } = currentNode.widgets.find(w => w.name === 'node'); + const nodeIdRegex = /\[(\d+)\]/; + const match = value.match(nodeIdRegex); + const nodeId = match ? parseInt(match[1], 10) : null; + if (!nodeId) return; + + const optionNode = app.graph._nodes_by_id[nodeId]; + if (!optionNode || !optionNode.widgets) return; + + const widgetsList = Object.values(optionNode.widgets) + .filter( + function(w) { + if (searchType) { + return searchType.includes(w.type) + } + } + ) + .map((w) => w.name); + + if (widgetsList) { + for (const w of currentNode.widgets) { + if (w.name === 'widget') { + w.options.values = widgetsList + } + } + } + + + const widgetWidget = currentNode.widgets.find(w => w.name === 'widget'); + const widgetWidgetValue = widgetWidget.value; + + if (searchType.includes('number')) { + const int_widgets = [ + 'seed', + 'clip_skip', + 'steps', + 'start_at_step', + 'end_at_step', + 'empty_latent_width', + 'empty_latent_height', + 'noise_seed', + ] + const float_widgets = [ + 'cfg', + 'denoise', + 'strength_model', + 'strength_clip', + 'strength', + 'scale_by', + 'lora_strength' + ] + + const rangeModeWidget = currentNode.widgets.find(w => w.name === 'range_mode'); + const rangeModeWidgetValue = rangeModeWidget.value; + + if (int_widgets.includes(widgetWidgetValue)) { + rangeModeWidget.options.values = ['step_int', 'num_steps_int'] + if (rangeModeWidgetValue === 'num_steps_float') { + rangeModeWidget.value = 'num_steps_int' + } + if (rangeModeWidgetValue === 'step_float') { + rangeModeWidget.value = 'step_int' + } + } else if (float_widgets.includes(widgetWidgetValue)) { + rangeModeWidget.options.values = ['step_float', 'num_steps_float'] + rangeModeWidget.value.replace('int', 'float') + if (rangeModeWidgetValue === 'num_steps_int') { + rangeModeWidget.value = 'num_steps_float' + } + if (rangeModeWidgetValue === 'step_int') { + rangeModeWidget.value = 'step_float' + } + } else { + rangeModeWidget.options.values = ['step_int', 'num_steps_int', 'step_float', 'num_steps_float'] + } + } + if (searchType.includes('combo')) { + const optionsWidget = optionNode.widgets.find(w => w.name === widgetWidgetValue) + if (optionsWidget) { + const values = optionsWidget.options.values + currentNode.widgets.find(w => w.name === 'start_from').options.values = values + currentNode.widgets.find(w => w.name === 'end_with').options.values = values + currentNode.widgets.find(w => w.name === 'select').options.values = values + } + } +} + +const getSetWidgets = [ + "node", + "widget", + "start_from", + "end_with", +] + +function getSetters(node, searchType) { + if (node.widgets) { + const gswidgets = node.widgets.filter(function(widget) { + return getSetWidgets.includes(widget.name); + }); + for (const w of gswidgets) { + setPlotWidgetOptions(node, searchType); + let widgetValue = w.value; + + // Define getters and setters for widget values + Object.defineProperty(w, 'value', { + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + setPlotWidgetOptions(node, searchType); + } + } + }); + } + + const selectWidget = node.widgets.find(w => w.name === 'select') + if (selectWidget) { + let widgetValue = selectWidget.value; + let selectedWidget = node.widgets.find(w => w.name === 'selection'); + + Object.defineProperty(selectWidget, 'value', { + get() { + return widgetValue; + }, + set(newVal) { + if (newVal !== widgetValue) { + widgetValue = newVal; + if (selectedWidget.inputEl.value.trim() === '') { + selectedWidget.inputEl.value = newVal; + } else { + selectedWidget.inputEl.value += "\n" + newVal; + } + } + } + }) + } + } + let mouseOver = node.mouseOver; + Object.defineProperty(node, 'mouseOver', { + get() { + return mouseOver; + }, + set(newVal) { + if (newVal !== mouseOver) { + mouseOver = newVal; + if (mouseOver) { + setPlotWidgetOptions(node, searchType); + setPlotNodeOptions(node); + } + } + } + }) + +} + + +app.registerExtension({ + name: "comfy.ttN.xyPlotAdv", + beforeRegisterNodeDef(nodeType, nodeData, app) { + + /*if (nodeData.name === "ttN advPlot range") { + const origOnConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, slotIndex, isConnected, link_info, _ioSlot) { + const r = origOnConnectionsChange ? origOnConnectionsChange.apply(this, arguments) : undefined; + if (link_info && (slotIndex == 0 || slotIndex == 1)) { + const originID = link_info?.origin_id + const targetID = link_info?.target_id + + const currentNode = app.graph._nodes_by_id[originID]; + + setPlotNodeOptions(currentNode, targetID) + } + return r; + }; + }*/ + }, + nodeCreated(node) { + const node_title = node.constructor.title; + + if (node_title === "advanced xyPlot") { + dropdownCreator(node); + } + if (node_title === "advPlot range") { + getSetters(node, ['number',]); + } + if (node_title === "advPlot string") { + getSetters(node, ['text', 'customtext']); + } + if (node_title === "advPlot combo") { + getSetters(node, ['combo',]); + } + }, +}); diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/js/utils.js b/zavodik/nodes/ComfyUI_tinyterraNodes/js/utils.js new file mode 100644 index 0000000000000000000000000000000000000000..8af4ab8bd4a67e327828d64caf94f0e51f88dd92 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/js/utils.js @@ -0,0 +1,261 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; + + +export function rebootAPI() { + if (confirm("Are you sure you'd like to reboot the server?")) { + try { + api.fetchApi("/ttN/reboot"); + } + catch(exception) { + console.log("Failed to reboot: " + exception); + } + return true; + } + + return false; +} + +export function wait(ms = 16, value) { + return new Promise((resolve) => { + setTimeout(() => { + resolve(value); + }, ms); + }); +} + + + + + + +const CONVERTED_TYPE = "converted-widget"; +const GET_CONFIG = Symbol(); + +export function getConfig(widgetName, node) { + const { nodeData } = node.constructor; + return nodeData?.input?.required[widgetName] ?? nodeData?.input?.optional?.[widgetName]; +} + +export function hideWidget(node, widget, suffix = "") { + widget.origType = widget.type; + widget.origComputeSize = widget.computeSize; + widget.origSerializeValue = widget.serializeValue; + widget.computeSize = () => [0, -4]; // -4 is due to the gap litegraph adds between widgets automatically + widget.type = CONVERTED_TYPE + suffix; + widget.serializeValue = () => { + // Prevent serializing the widget if we have no input linked + if (!node.inputs) { + return undefined; + } + let node_input = node.inputs.find((i) => i.widget?.name === widget.name); + + if (!node_input || !node_input.link) { + return undefined; + } + return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidget(node, w, ":" + widget.name); + } + } +} + +export function getWidgetType(config) { + // Special handling for COMBO so we restrict links based on the entries + let type = config[0]; + if (type instanceof Array) { + type = "COMBO"; + } + return { type }; +} + +export function convertToInput(node, widget, config) { + hideWidget(node, widget); + + const { type } = getWidgetType(config); + + // Add input and store widget config for creating on primitive node + const sz = node.size; + node.addInput(widget.name, type, { + widget: { name: widget.name, [GET_CONFIG]: () => config }, + }); + + for (const widget of node.widgets) { + widget.last_y += LiteGraph.NODE_SLOT_HEIGHT; + } + + // Restore original size but grow if needed + node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])]); +} + +export function tinyterraReloadNode(node) { + // Retrieves original values or uses current ones as fallback. Options for creating a new node. + const { title: nodeTitle, color: nodeColor, bgcolor: bgColor } = node.properties.origVals || node; + const options = { + size: [...node.size], + color: nodeColor, + bgcolor: bgColor, + pos: [...node.pos] + }; + + // Store a reference to the old node before it gets replaced. + const oldNode = node + + // Track connections to re-establish later. + const inputConnections = [], outputConnections = []; + if (node.inputs) { + for (const input of node.inputs ?? []) { + if (input.link) { + const input_name = input.name + const input_slot = node.findInputSlot(input_name) + const input_node = node.getInputNode(input_slot) + const input_link = node.getInputLink(input_slot) + + inputConnections.push([input_link.origin_slot, input_node, input_name]) + } + } + } + if (node.outputs) { + for (const output of node.outputs) { + if (output.links) { + const output_name = output.name + + for (const linkID of output.links) { + const output_link = graph.links[linkID] + const output_node = graph._nodes_by_id[output_link.target_id] + outputConnections.push([output_name, output_node, output_link.target_slot]) + } + } + } + } + // Remove old node and create a new one. + app.graph.remove(node) + const newNode = app.graph.add(LiteGraph.createNode(node.constructor.type, nodeTitle, options)); + if (newNode?.constructor?.hasOwnProperty('ttNnodeVersion')) { + newNode.properties.ttNnodeVersion = newNode.constructor.ttNnodeVersion; + } + + // A function to handle reconnection of links to the new node. + function handleLinks() { + for (let ow of oldNode.widgets) { + if (ow.type === CONVERTED_TYPE) { + const config = getConfig(ow.name, oldNode) + const WidgetToConvert = newNode.widgets.find((nw) => nw.name === ow.name); + if (WidgetToConvert && !newNode?.inputs?.find((i) => i.name === ow.name)) { + convertToInput(newNode, WidgetToConvert, config); + } + } + } + + // replace input and output links + for (let input of inputConnections) { + const [output_slot, output_node, input_name] = input; + output_node.connect(output_slot, newNode.id, input_name) + } + for (let output of outputConnections) { + const [output_name, input_node, input_slot] = output; + newNode.connect(output_name, input_node, input_slot) + } + } + + // fix widget values + let values = oldNode.widgets_values; + if (!values) { + console.log('NO VALUES') + newNode.widgets.forEach((newWidget, index) => { + let pass = false + while ((index < oldNode.widgets.length) && !pass) { + const oldWidget = oldNode.widgets[index]; + if (newWidget.type === oldWidget.type) { + newWidget.value = oldWidget.value; + pass = true + } + index++; + } + }); + } + else { + let isValid = false + const isIterateForwards = values.length <= newNode.widgets.length; + let valueIndex = isIterateForwards ? 0 : values.length - 1; + + const parseWidgetValue = (value, widget) => { + if (['', null].includes(value) && (widget.type === "button" || widget.type === "converted-widget")) { + return { value, isValid: true }; + } + if (typeof value === "boolean" && widget.options?.on && widget.options?.off) { + return { value, isValid: true }; + } + if (widget.options?.values?.includes(value)) { + return { value, isValid: true }; + } + if (widget.inputEl) { + if (typeof value === "string" || value === widget.value) { + return { value, isValid: true }; + } + } + if (!isNaN(value)) { + value = parseFloat(value); + if (widget.options?.min <= value && value <= widget.options?.max) { + return { value, isValid: true }; + } + } + return { value: widget.value, isValid: false }; + }; + + function updateValue(widgetIndex) { + const oldWidget = oldNode.widgets[widgetIndex]; + let newWidget = newNode.widgets[widgetIndex]; + let newValueIndex = valueIndex + + if (newWidget.name === oldWidget.name && (newWidget.type === oldWidget.type || oldWidget.type === 'ttNhidden' || newWidget.type === 'ttNhidden')) { + + while ((isIterateForwards ? newValueIndex < values.length : newValueIndex >= 0) && !isValid) { + let { value, isValid } = parseWidgetValue(values[newValueIndex], newWidget); + if (isValid && value !== NaN) { + newWidget.value = value; + break; + } + newValueIndex += isIterateForwards ? 1 : -1; + } + + if (isIterateForwards) { + if (newValueIndex === valueIndex) { + valueIndex++; + } + if (newValueIndex === valueIndex + 1) { + valueIndex++; + valueIndex++; + } + } else { + if (newValueIndex === valueIndex) { + valueIndex--; + } + if (newValueIndex === valueIndex - 1) { + valueIndex--; + valueIndex--; + } + } + //console.log('\n') + } + }; + if (isIterateForwards) { + for (let widgetIndex = 0; widgetIndex < newNode.widgets.length; widgetIndex++) { + updateValue(widgetIndex); + } + } else { + for (let widgetIndex = newNode.widgets.length - 1; widgetIndex >= 0; widgetIndex--) { + updateValue(widgetIndex); + } + } + } + handleLinks(); + + newNode.setSize(options.size) + newNode.onResize([0,0]); +}; \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/pyproject.toml b/zavodik/nodes/ComfyUI_tinyterraNodes/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..6a19ecdb01781b899c0a62e76c7a034c55f55c54 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/pyproject.toml @@ -0,0 +1,14 @@ +[project] +name = "comfyui_tinyterranodes" +description = "Customizable xyPlot, various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more." +version = "2.0.11" +license = { file = "LICENSE" } + +[project.urls] +Repository = "https://github.com/TinyTerra/ComfyUI_tinyterraNodes" + +[tool.comfy] +PublisherId = "tinyterra" +DisplayName = "tinyterraNodes" +Icon = "images/icon.jpg" +Models = [] diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNdev.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNdev.py new file mode 100644 index 0000000000000000000000000000000000000000..25590d52ed34369bbc2431118d9acc78db9ecda8 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNdev.py @@ -0,0 +1,34 @@ +# in_dev - likely broken +class ttN_compareInput: + @classmethod + def INPUT_TYPES(s): + return {"required": {"console_title": ("STRING", {"default": "ttN INPUT COMPARE"}),}, + "optional": {"debug": ("", {"default": None}), + "debug2": ("", {"default": None}),} + } + + RETURN_TYPES = tuple() + RETURN_NAMES = tuple() + FUNCTION = "debug" + CATEGORY = "🌏 tinyterra/dev" + OUTPUT_NODE = True + + def debug(_, **kwargs): + + values = [] + for key, value in kwargs.items(): + if key == "console_title": + print(value) + else: + print(f"{key}: {value}") + values.append(value) + + return tuple() + +NODE_CLASS_MAPPINGS = { + "ttN compareInput": ttN_compareInput, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ttN compareInput": "compareInput", +} \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/adv_encode.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/adv_encode.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daceb5e2449ab4ac305ae89bb5742f297dfab8dc Binary files /dev/null and b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/adv_encode.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/tinyterraNodes.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/tinyterraNodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..714d5896d9b00d532174f3bc69074c5a1e0a3267 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/tinyterraNodes.cpython-313.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a1ac5c4d63d9f61315aff42e35c6c0179487f89409965fc3f8e16c8d1ee779b +size 169885 diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNexecutor.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNexecutor.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e416c5bf578277deefa9e7d944a9dbcfdaea366 Binary files /dev/null and b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNexecutor.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNlegacyNodes.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNlegacyNodes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..778801f89081799f6af563f58a76a57e62ee20d5 Binary files /dev/null and b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNlegacyNodes.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNserver.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNserver.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ee9406b4944f090ff89a3bd6ca150af9fe06947 Binary files /dev/null and b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/ttNserver.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/utils.cpython-313.pyc b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca65bac8fce05bdf6753b4e2cd942e053f017fc Binary files /dev/null and b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/__pycache__/utils.cpython-313.pyc differ diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/adv_encode.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/adv_encode.py new file mode 100644 index 0000000000000000000000000000000000000000..9e7299abdebb4ec003d46d66b902380fb3aa95ad --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/adv_encode.py @@ -0,0 +1,401 @@ +import torch +import numpy as np +import itertools +from math import gcd + +from comfy import model_management +from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG, StableCascadeClipModel +try: + from comfy.text_encoders.sd3_clip import SD3ClipModel, T5XXLModel +except ImportError: + from comfy.sd3_clip import SD3ClipModel, T5XXLModel + +try: + from comfy.text_encoders.flux import FluxClipModel +except: + FluxClipModel = None + pass + +def _grouper(n, iterable): + it = iter(iterable) + while True: + chunk = list(itertools.islice(it, n)) + if not chunk: + return + yield chunk + +def _norm_mag(w, n): + d = w - 1 + return 1 + np.sign(d) * np.sqrt(np.abs(d) ** 2 / n) + # return np.sign(w) * np.sqrt(np.abs(w)**2 / n) + +def divide_length(word_ids, weights): + sums = dict(zip(*np.unique(word_ids, return_counts=True))) + sums[0] = 1 + weights = [[_norm_mag(w, sums[id]) if id != 0 else 1.0 + for w, id in zip(x, y)] for x, y in zip(weights, word_ids)] + return weights + +def shift_mean_weight(word_ids, weights): + delta = 1 - np.mean([w for x, y in zip(weights, word_ids) for w, id in zip(x, y) if id != 0]) + weights = [[w if id == 0 else w + delta + for w, id in zip(x, y)] for x, y in zip(weights, word_ids)] + return weights + +def scale_to_norm(weights, word_ids, w_max): + top = np.max(weights) + w_max = min(top, w_max) + weights = [[w_max if id == 0 else (w / top) * w_max + for w, id in zip(x, y)] for x, y in zip(weights, word_ids)] + return weights + +def from_zero(weights, base_emb): + weight_tensor = torch.tensor(weights, dtype=base_emb.dtype, device=base_emb.device) + weight_tensor = weight_tensor.reshape(1, -1, 1).expand(base_emb.shape) + return base_emb * weight_tensor + +def mask_word_id(tokens, word_ids, target_id, mask_token): + new_tokens = [[mask_token if wid == target_id else t + for t, wid in zip(x, y)] for x, y in zip(tokens, word_ids)] + mask = np.array(word_ids) == target_id + return (new_tokens, mask) + +def batched_clip_encode(tokens, length, encode_func, num_chunks): + embs = [] + for e in _grouper(32, tokens): + enc, pooled = encode_func(e) + try: + enc = enc.reshape((len(e), length, -1)) + except: + raise Exception("Down_Weight and Comfy++ weight interpretations are not currently supported with this model.") + embs.append(enc) + embs = torch.cat(embs) + embs = embs.reshape((len(tokens) // num_chunks, length * num_chunks, -1)) + return embs + +def from_masked(tokens, weights, word_ids, base_emb, length, encode_func, m_token=266): + pooled_base = base_emb[0, length - 1:length, :] + wids, inds = np.unique(np.array(word_ids).reshape(-1), return_index=True) + weight_dict = dict((id, w) + for id, w in zip(wids, np.array(weights).reshape(-1)[inds]) + if w != 1.0) + + if len(weight_dict) == 0: + return torch.zeros_like(base_emb), base_emb[0, length - 1:length, :] + + weight_tensor = torch.tensor(weights, dtype=base_emb.dtype, device=base_emb.device) + weight_tensor = weight_tensor.reshape(1, -1, 1).expand(base_emb.shape) + + # m_token = (clip.tokenizer.end_token, 1.0) if clip.tokenizer.pad_with_end else (0,1.0) + # TODO: find most suitable masking token here + m_token = (m_token, 1.0) + + ws = [] + masked_tokens = [] + masks = [] + + # create prompts + for id, w in weight_dict.items(): + masked, m = mask_word_id(tokens, word_ids, id, m_token) + masked_tokens.extend(masked) + + m = torch.tensor(m, dtype=base_emb.dtype, device=base_emb.device) + m = m.reshape(1, -1, 1).expand(base_emb.shape) + masks.append(m) + + ws.append(w) + + # batch process prompts + embs = batched_clip_encode(masked_tokens, length, encode_func, len(tokens)) + masks = torch.cat(masks) + + embs = (base_emb.expand(embs.shape) - embs) + pooled = embs[0, length - 1:length, :] + + embs *= masks + embs = embs.sum(axis=0, keepdim=True) + + pooled_start = pooled_base.expand(len(ws), -1) + ws = torch.tensor(ws).reshape(-1, 1).expand(pooled_start.shape) + pooled = (pooled - pooled_start) * (ws - 1) + pooled = pooled.mean(axis=0, keepdim=True) + + return ((weight_tensor - 1) * embs), pooled_base + pooled + +def mask_inds(tokens, inds, mask_token): + clip_len = len(tokens[0]) + inds_set = set(inds) + new_tokens = [[mask_token if i * clip_len + j in inds_set else t + for j, t in enumerate(x)] for i, x in enumerate(tokens)] + return new_tokens + +def down_weight(tokens, weights, word_ids, base_emb, length, encode_func, m_token=266): + w, w_inv = np.unique(weights, return_inverse=True) + + if np.sum(w < 1) == 0: + return base_emb, tokens, base_emb[0, length - 1:length, :] + # m_token = (clip.tokenizer.end_token, 1.0) if clip.tokenizer.pad_with_end else (0,1.0) + # using the comma token as a masking token seems to work better than aos tokens for SD 1.x + m_token = (m_token, 1.0) + + masked_tokens = [] + + masked_current = tokens + for i in range(len(w)): + if w[i] >= 1: + continue + masked_current = mask_inds(masked_current, np.where(w_inv == i)[0], m_token) + masked_tokens.extend(masked_current) + + embs = batched_clip_encode(masked_tokens, length, encode_func, len(tokens)) + embs = torch.cat([base_emb, embs]) + w = w[w <= 1.0] + w_mix = np.diff([0] + w.tolist()) + w_mix = torch.tensor(w_mix, dtype=embs.dtype, device=embs.device).reshape((-1, 1, 1)) + + weighted_emb = (w_mix * embs).sum(axis=0, keepdim=True) + return weighted_emb, masked_current, weighted_emb[0, length - 1:length, :] + +def scale_emb_to_mag(base_emb, weighted_emb): + norm_base = torch.linalg.norm(base_emb) + norm_weighted = torch.linalg.norm(weighted_emb) + embeddings_final = (norm_base / norm_weighted) * weighted_emb + return embeddings_final + +def recover_dist(base_emb, weighted_emb): + fixed_std = (base_emb.std() / weighted_emb.std()) * (weighted_emb - weighted_emb.mean()) + embeddings_final = fixed_std + (base_emb.mean() - fixed_std.mean()) + return embeddings_final + +def A1111_renorm(base_emb, weighted_emb): + embeddings_final = (base_emb.mean() / weighted_emb.mean()) * weighted_emb + return embeddings_final + +def advanced_encode_from_tokens(tokenized, token_normalization, weight_interpretation, encode_func, m_token=266, + length=77, w_max=1.0, return_pooled=False, apply_to_pooled=False): + tokens = [[t for t, _, _ in x] for x in tokenized] + weights = [[w for _, w, _ in x] for x in tokenized] + word_ids = [[wid for _, _, wid in x] for x in tokenized] + + # weight normalization + # ==================== + + # distribute down/up weights over word lengths + if token_normalization.startswith("length"): + weights = divide_length(word_ids, weights) + + # make mean of word tokens 1 + if token_normalization.endswith("mean"): + weights = shift_mean_weight(word_ids, weights) + + # weight interpretation + # ===================== + pooled = None + + if weight_interpretation == "comfy": + weighted_tokens = [[(t, w) for t, w in zip(x, y)] for x, y in zip(tokens, weights)] + weighted_emb, pooled_base = encode_func(weighted_tokens) + pooled = pooled_base + else: + unweighted_tokens = [[(t, 1.0) for t, _, _ in x] for x in tokenized] + base_emb, pooled_base = encode_func(unweighted_tokens) + + if weight_interpretation == "A1111": + weighted_emb = from_zero(weights, base_emb) + weighted_emb = A1111_renorm(base_emb, weighted_emb) + pooled = pooled_base + + if weight_interpretation == "compel": + pos_tokens = [[(t, w) if w >= 1.0 else (t, 1.0) for t, w in zip(x, y)] for x, y in zip(tokens, weights)] + weighted_emb, _ = encode_func(pos_tokens) + weighted_emb, _, pooled = down_weight(pos_tokens, weights, word_ids, weighted_emb, length, encode_func) + + if weight_interpretation == "comfy++": + weighted_emb, tokens_down, _ = down_weight(unweighted_tokens, weights, word_ids, base_emb, length, encode_func) + weights = [[w if w > 1.0 else 1.0 for w in x] for x in weights] + # unweighted_tokens = [[(t,1.0) for t, _, _ in x] for x in tokens_down] + embs, pooled = from_masked(unweighted_tokens, weights, word_ids, base_emb, length, encode_func) + weighted_emb += embs + + if weight_interpretation == "down_weight": + weights = scale_to_norm(weights, word_ids, w_max) + weighted_emb, _, pooled = down_weight(unweighted_tokens, weights, word_ids, base_emb, length, encode_func) + + if return_pooled: + if apply_to_pooled: + return weighted_emb, pooled + else: + return weighted_emb, pooled_base + return weighted_emb, None + +def encode_token_weights_g(model, token_weight_pairs): + return model.clip_g.encode_token_weights(token_weight_pairs) + +def encode_token_weights_l(model, token_weight_pairs): + return model.clip_l.encode_token_weights(token_weight_pairs) + +def encode_token_weights_t5(model, token_weight_pairs): + return model.t5xxl.encode_token_weights(token_weight_pairs) + +def encode_token_weights(model, token_weight_pairs, encode_func): + if model.layer_idx is not None: + model.cond_stage_model.set_clip_options({"layer": model.layer_idx}) + + model_management.load_model_gpu(model.patcher) + return encode_func(model.cond_stage_model, token_weight_pairs) + +def prepareXL(embs_l, embs_g, pooled, clip_balance): + l_w = 1 - max(0, clip_balance - .5) * 2 + g_w = 1 - max(0, .5 - clip_balance) * 2 + if embs_l is not None: + return torch.cat([embs_l * l_w, embs_g * g_w], dim=-1), pooled + else: + return embs_g, pooled + +def prepareSD3(out, pooled, clip_balance): + lg_w = 1 - max(0, clip_balance - .5) * 2 + t5_w = 1 - max(0, .5 - clip_balance) * 2 + if out.shape[0] > 1: + return torch.cat([out[0] * lg_w, out[1] * t5_w], dim=-1), pooled + else: + return out, pooled + +def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True): + tokenized = clip.tokenize(text, return_word_ids=True) + + if SD3ClipModel and isinstance(clip.cond_stage_model, SD3ClipModel): + lg_out = None + pooled = None + out = None + + if len(tokenized['l']) > 0 or len(tokenized['g']) > 0: + if 'l' in tokenized: + lg_out, l_pooled = advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, return_pooled=True,) + else: + l_pooled = torch.zeros((1, 768), device=model_management.intermediate_device()) + + if 'g' in tokenized: + g_out, g_pooled = advanced_encode_from_tokens(tokenized['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, return_pooled=True) + if lg_out is not None: + lg_out = torch.cat([lg_out, g_out], dim=-1) + else: + lg_out = torch.nn.functional.pad(g_out, (768, 0)) + else: + g_out = None + g_pooled = torch.zeros((1, 1280), device=model_management.intermediate_device()) + + if lg_out is not None: + lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1])) + out = lg_out + pooled = torch.cat((l_pooled, g_pooled), dim=-1) + + # t5xxl + if 't5xxl' in tokenized and clip.cond_stage_model.t5xxl is not None: + t5_out, t5_pooled = advanced_encode_from_tokens(tokenized['t5xxl'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_t5), + w_max=w_max, return_pooled=True) + if lg_out is not None: + out = torch.cat([lg_out, t5_out], dim=-2) + else: + out = t5_out + + if out is None: + out = torch.zeros((1, 77, 4096), device=model_management.intermediate_device()) + + if pooled is None: + pooled = torch.zeros((1, 768 + 1280), device=model_management.intermediate_device()) + + return prepareSD3(out, pooled, clip_balance) + + elif FluxClipModel and isinstance(clip.cond_stage_model, FluxClipModel): + if 't5xxl' in tokenized and clip.cond_stage_model.t5xxl is not None: + t5_out, t5_pooled = advanced_encode_from_tokens(tokenized['t5xxl'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_t5), + w_max=w_max, return_pooled=True,) + + if len(tokenized['l']) > 0: + if 'l' in tokenized: + l_out, l_pooled = advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, return_pooled=True,) + else: + l_pooled = torch.zeros((1, 768), device=model_management.intermediate_device()) + + return t5_out, l_pooled + + elif isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)): + embs_l = None + embs_g = None + pooled = None + if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel): + embs_l, _ = advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, + return_pooled=False) + if 'g' in tokenized: + embs_g, pooled = advanced_encode_from_tokens(tokenized['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, + return_pooled=True, + apply_to_pooled=apply_to_pooled) + return prepareXL(embs_l, embs_g, pooled, clip_balance) + + elif isinstance(clip.cond_stage_model, StableCascadeClipModel): + return advanced_encode_from_tokens( + tokenized['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, + return_pooled=True, + apply_to_pooled=apply_to_pooled + ) + else: + return advanced_encode_from_tokens(tokenized['l'], + token_normalization, + weight_interpretation, + lambda x: (clip.encode_from_tokens({'l': x}), None), + w_max=w_max) + +def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True): + tokenized1 = clip.tokenize(text1, return_word_ids=True) + tokenized2 = clip.tokenize(text2, return_word_ids=True) + + embs_l, _ = advanced_encode_from_tokens(tokenized1['l'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_l), + w_max=w_max, + return_pooled=False) + + embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'], + token_normalization, + weight_interpretation, + lambda x: encode_token_weights(clip, x, encode_token_weights_g), + w_max=w_max, + return_pooled=True, + apply_to_pooled=apply_to_pooled) + + gcd_num = gcd(embs_l.shape[1], embs_g.shape[1]) + repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1]) + repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1]) + + return prepareXL(embs_l.expand((-1,repeat_l,-1)), embs_g.expand((-1,repeat_g,-1)), pooled, clip_balance) \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/tinyterraNodes.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/tinyterraNodes.py new file mode 100644 index 0000000000000000000000000000000000000000..c06227911e77ed4435dba20fb9428aaf6c848db9 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/tinyterraNodes.py @@ -0,0 +1,3824 @@ +""" +@author: tinyterra +@title: tinyterraNodes +@nickname: 🌏 +@description: This extension offers extensive xyPlot, various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more. +""" + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# tinyterraNodes developed in 2023 by tinyterra https://github.com/TinyTerra # +# for ComfyUI https://github.com/comfyanonymous/ComfyUI # +# Like the pack and want to support me? https://www.buymeacoffee.com/tinyterra # +#---------------------------------------------------------------------------------------------------------------------------------------------------# + +ttN_version = '2.0.9' + +import asyncio +import os +import re +import json +import copy +import random +import datetime +from pathlib import Path +from urllib.request import urlopen +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple, Union, Any +import uuid + +import numpy as np +import torch +import hashlib +from PIL import Image, ImageDraw, ImageFont +from PIL.PngImagePlugin import PngInfo + +import nodes +import comfy.sd +import execution +import comfy.utils +import folder_paths +import comfy.samplers +import latent_preview +import comfy.controlnet +import comfy.model_management +import comfy.supported_models +import comfy.supported_models_base +from comfy.model_base import BaseModel +import comfy_extras.nodes_upscale_model +import comfy_extras.nodes_model_advanced +from comfy.sd import CLIP, VAE +from spandrel import ModelLoader, ImageModelDescriptor +from .adv_encode import advanced_encode +from comfy.model_patcher import ModelPatcher +from nodes import MAX_RESOLUTION, ControlNetApplyAdvanced, ConditioningZeroOut +from nodes import NODE_CLASS_MAPPINGS as COMFY_CLASS_MAPPINGS + +from .utils import CC, ttNl, ttNpaths, AnyType +from .ttNexecutor import xyExecutor + +OUTPUT_FILETYPES = ["png", "jpg", "jpeg", "tiff", "tif", "webp", "bmp"] +UPSCALE_METHODS = ["None", + "[latent] nearest-exact", "[latent] bilinear", "[latent] area", "[latent] bicubic", "[latent] lanczos", "[latent] bislerp", + "[hiresFix] nearest-exact", "[hiresFix] bilinear", "[hiresFix] area", "[hiresFix] bicubic", "[hiresFix] lanczos", "[hiresFix] bislerp"] +UPSCALE_MODELS = folder_paths.get_filename_list("upscale_models") + ["None"] +CROP_METHODS = ["disabled", "center"] +CUSTOM_SCHEDULERS = ["AYS SD1", "AYS SDXL", "AYS SVD", "GITS SD1"] + +class ttNloader: + def __init__(self): + self.loraDict = {lora.split('\\')[-1]: lora for lora in folder_paths.get_filename_list("loras")} + self.loader_cache = {} + + @staticmethod + def nsp_parse(text, seed=0, noodle_key='__', nspterminology=None, pantry_path=None, title=None, my_unique_id=None): + if "__" not in text: + return text + + if nspterminology is None: + # Fetch the NSP Pantry + if pantry_path is None: + pantry_path = os.path.join(ttNpaths.tinyterraNodes, 'nsp_pantry.json') + if not os.path.exists(pantry_path): + response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') + tmp_pantry = json.loads(response.read()) + # Dump JSON locally + pantry_serialized = json.dumps(tmp_pantry, indent=4) + with open(pantry_path, "w") as f: + f.write(pantry_serialized) + del response, tmp_pantry + + # Load local pantry + with open(pantry_path, 'r') as f: + nspterminology = json.load(f) + + if seed > 0 or seed < 0: + random.seed(seed) + + # Parse Text + new_text = text + for term in nspterminology: + # Target Noodle + tkey = f'{noodle_key}{term}{noodle_key}' + # How many occurrences? + tcount = new_text.count(tkey) + + if tcount > 0: + nsp_parsed = True + + # Apply random results for each noodle counted + for _ in range(tcount): + new_text = new_text.replace( + tkey, random.choice(nspterminology[term]), 1) + seed += 1 + random.seed(seed) + + ttNl(new_text).t(f'{title}[{my_unique_id}]').p() + + return new_text + + @staticmethod + def clean_values(values: str): + original_values = values.split("; ") + cleaned_values = [] + + for value in original_values: + cleaned_value = value.strip(';').strip() + if cleaned_value: + try: + cleaned_value = int(cleaned_value) + except ValueError: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + + cleaned_values.append(cleaned_value) + return cleaned_values + + @staticmethod + def string_to_seed(s): + h = hashlib.sha256(s.encode()).digest() + return (int.from_bytes(h, byteorder='big') & 0xffffffffffffffff) + + def clear_cache(self, prompt, full=False): + loader_ids = [f'loader{key}' for key, value in prompt.items() if value['class_type'] in ['ttN pipeLoader_v2', 'ttN pipeLoaderSDXL_v2']] + + if full is True: + self.loader_cache = {} + else: + for key in list(self.loader_cache.keys()): + if key not in loader_ids: + self.loader_cache.pop(key) + + def load_checkpoint(self, ckpt_name, config_name=None, clip_skip=0, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + if config_name not in [None, "Default"]: + config_path = folder_paths.get_full_path("configs", config_name) + loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + else: + loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + clip = loaded_ckpt[1].clone() if loaded_ckpt[1] is not None else None + if clip_skip != 0 and clip is not None: + if sampler.get_model_type(loaded_ckpt[0]) in ['FLUX', 'FLOW']: + raise Exception('FLOW and FLUX do not support clip_skip. Set clip_skip to 0.') + clip.clip_layer(clip_skip) + + # model, clip, vae + return loaded_ckpt[0], clip, loaded_ckpt[2] + + def load_unclip(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return out + + def load_vae(self, vae_name): + vae_path = folder_paths.get_full_path("vae", vae_name) + sd = comfy.utils.load_torch_file(vae_path) + loaded_vae = comfy.sd.VAE(sd=sd) + + return loaded_vae + + def load_controlNet(self, positive, negative, controlnet_name, image, strength, start_percent, end_percent): + if type(controlnet_name) == str: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_name) + controlnet = comfy.controlnet.load_controlnet(controlnet_path) + else: + controlnet = controlnet_name + + controlnet_conditioning = ControlNetApplyAdvanced().apply_controlnet(positive, negative, controlnet, image, strength, start_percent, end_percent) + base_positive, base_negative = controlnet_conditioning[0], controlnet_conditioning[1] + return base_positive, base_negative + + def load_lora(self, lora_name, model, clip, strength_model, strength_clip): + if strength_model == 0 and strength_clip == 0: + return (model, clip) + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None or not os.path.exists(lora_path): + ttNl(f'{lora_path}').t("Skipping missing lora").error().p() + return (model, clip) + + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + + return model_lora, clip_lora + + def validate_lora_format(self, lora_string): + if lora_string is None: + return None + if not re.match(r'^$', lora_string): + ttNl(f'{lora_string}').t("Skipping invalid lora format").error().p() + return None + + return lora_string + + def parse_lora_string(self, lora_string): + # Remove '' from the end, then split by ':' + parts = lora_string[6:-1].split(':') # 6 is the length of ' 0 else None + weight1 = float(parts[1]) if len(parts) > 1 else None + weight2 = float(parts[2]) if len(parts) > 2 else weight1 + return lora_name, weight1, weight2 + + def load_lora_text(self, loras, model, clip): + # Extract potential patterns + pattern = r']+>' + matches = re.findall(pattern, loras) + + # Validate each extracted pattern + for match in matches: + match = self.validate_lora_format(match) + if match is not None: + lora_name, weight1, weight2 = self.parse_lora_string(match) + + if lora_name not in self.loraDict: + ttNl(f'{lora_name}').t("Skipping unknown lora").error().p() + continue + + lora_name = self.loraDict.get(lora_name, lora_name) + model, clip = self.load_lora(lora_name, model, clip, weight1, weight2) + + return model, clip + + def embedding_encode(self, text, token_normalization, weight_interpretation, clip, seed=None, title=None, my_unique_id=None, prepend_text=None, zero_out=False): + text = f'{prepend_text} {text}' if prepend_text is not None else text + if seed is None: + seed = self.string_to_seed(text) + + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + embedding, pooled = advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled='enable') + conditioning = [[embedding, {"pooled_output": pooled}]] + + if zero_out is True and text.strip() == '': + return ConditioningZeroOut().zero_out(conditioning)[0] + else: + return conditioning + + def embedding_encodeXL(self, text, clip, seed=0, title=None, my_unique_id=None, prepend_text=None, text2=None, prepend_text2=None, width=None, height=None, crop_width=0, crop_height=0, target_width=None, target_height=None, refiner_clip=None, ascore=None): + text = f'{prepend_text} {text}' if prepend_text is not None else text + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + target_width = target_width if target_width is not None else width + target_height = target_height if target_height is not None else height + + if text2 is not None and refiner_clip is not None: + text2 = f'{prepend_text2} {text2}' if prepend_text2 is not None else text2 + text2 = self.nsp_parse(text2, seed, title=title, my_unique_id=my_unique_id) + + tokens_refiner = refiner_clip.tokenize(text2) + cond_refiner, pooled_refiner = refiner_clip.encode_from_tokens(tokens_refiner, return_pooled=True) + refiner_conditioning = [[cond_refiner, {"pooled_output": pooled_refiner, "aesthetic_score": ascore, "width": width,"height": height}]] + else: + refiner_conditioning = None + + if text2 is None or text2.strip() == '': + text2 = text + + tokens = clip.tokenize(text) + tokens["l"] = clip.tokenize(text2)["l"] + if len(tokens["l"]) != len(tokens["g"]): + empty = clip.tokenize("") + while len(tokens["l"]) < len(tokens["g"]): + tokens["l"] += empty["l"] + while len(tokens["l"]) > len(tokens["g"]): + tokens["g"] += empty["g"] + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + conditioning = [[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_width, "crop_h": crop_height, "target_width": target_width, "target_height": target_height}]] + + return conditioning, refiner_conditioning + + def load_main3(self, ckpt_name, config_name, vae_name, loras, clip_skip, model_override=None, clip_override=None, optional_lora_stack=None, unique_id=None): + cache = self.loader_cache.get(f'loader{unique_id}', None) + + model = "override" if model_override is not None else None + clip = "override" if clip_override is not None else None + vae = None + + if cache is not None and cache[0] == ckpt_name and cache[1] == config_name and cache[2] == vae_name and model is None and clip is None: + # Load from cache if it's the same + model = cache[3] + clip = cache[4] + vae = cache[5] + elif model is None or clip is None: + self.loader_cache.pop(f'loader{unique_id}', None) + + # Load normally + output_vae, output_clip = True, True + + if vae_name != "Baked VAE": + output_vae = False + if clip not in [None, "None", "override"]: + output_clip = False + + model, clip, vae = self.load_checkpoint(ckpt_name, config_name, clip_skip, output_vae, output_clip) + + if vae is None: + if vae_name != "Baked VAE": + vae = self.load_vae(vae_name) + else: + _, _, vae = self.load_checkpoint(ckpt_name, config_name, clip_skip, output_vae=True, output_clip=False) + + if unique_id is not None and model != "override" and clip != "override": + self.loader_cache[f'loader{unique_id}'] = [ckpt_name, config_name, vae_name, model, clip, vae] + + if model_override is not None: + self.loader_cache.pop(f'loader{unique_id}', None) + model = model_override + del model_override + + if clip_override is not None: + clip = clip_override.clone() + + if clip_skip != 0: + if sampler.get_model_type(model) in ['FLUX', 'FLOW']: + raise Exception('FLOW and FLUX do not support clip_skip. Set clip_skip to 0.') + clip.clip_layer(clip_skip) + del clip_override + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = self.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if loras not in [None, "None"]: + model, clip = self.load_lora_text(loras, model, clip) + + if not clip: + raise Exception("No CLIP found") + + return model, clip, vae + +class ttNsampler: + def __init__(self): + self.last_helds: dict[str, list] = { + "results": [], + "pipe_line": [], + } + self.device = comfy.model_management.intermediate_device() + + @staticmethod + def tensor2pil(image: torch.Tensor) -> Image.Image: + """Convert a torch tensor to a PIL image.""" + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + + @staticmethod + def pil2tensor(image: Image.Image) -> torch.Tensor: + """Convert a PIL image to a torch tensor.""" + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + @staticmethod + def enforce_mul_of_64(d): + d = int(d) + if d<=7: + d = 8 + leftover = d % 8 # 8 is the number of pixels per byte + if leftover != 0: # if the number of pixels is not a multiple of 8 + if (leftover < 4): # if the number of pixels is less than 4 + d -= leftover # remove the leftover pixels + else: # if the number of pixels is more than 4 + d += 8 - leftover # add the leftover pixels + + return int(d) + + @staticmethod + def safe_split(to_split: str, delimiter: str) -> List[str]: + """Split the input string and return a list of non-empty parts.""" + parts = to_split.split(delimiter) + parts = [part for part in parts if part not in ('', ' ', ' ')] + + while len(parts) < 2: + parts.append('None') + return parts + + @staticmethod + def get_model_type(model): + base: BaseModel = model.model + return str(base.model_type).split('.')[1].strip() + + def emptyLatent(self, empty_latent_aspect: str, batch_size:int, width:int = None, height:int = None, sd3: bool = False) -> torch.Tensor: + if empty_latent_aspect and empty_latent_aspect != "width x height [custom]": + width, height = empty_latent_aspect.replace(' ', '').split('[')[0].split('x') + + if sd3: + latent = torch.ones([batch_size, 16, int(height) // 8, int(width) // 8], device=self.device) * 0.0609 + else: + latent = torch.zeros([batch_size, 4, int(height) // 8, int(width) // 8], device=self.device) + + return latent + + def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): + latent_image = latent["samples"] + + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + batch_inds = latent["batch_index"] if "batch_index" in latent else None + noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + if preview_latent: + callback = latent_preview.prepare_callback(model, steps) + else: + callback = None + + disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED + + if scheduler not in CUSTOM_SCHEDULERS: + samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, + force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + else: + sampler = comfy.samplers.sampler_object(sampler_name) + + if scheduler.startswith("AYS"): + from comfy_extras.nodes_align_your_steps import AlignYourStepsScheduler + + model_type = scheduler.split(' ')[1] + sigmas = AlignYourStepsScheduler().get_sigmas(model_type, steps, denoise)[0] + elif scheduler.startswith("GITS"): + from comfy_extras.nodes_gits import GITSScheduler + + sigmas = GITSScheduler().get_sigmas(1.2, steps, denoise)[0] + + samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + + out = latent.copy() + out["samples"] = samples + return out + + def upscale(self, samples, upscale_method, scale_by, crop): + s = samples.copy() + width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) + height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) + + if (width > MAX_RESOLUTION): + width = MAX_RESOLUTION + if (height > MAX_RESOLUTION): + height = MAX_RESOLUTION + + s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) + return (s,) + + def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool, + upscale_model_name: str=None, vae: VAE=None, images: np.ndarray=None, rescale: str=None, percent: float=None, width: int=None, height: int=None, longer_side: int=None) -> dict: + """Upscale the samples if the upscale_method is not set to 'None'.""" + upscale_method = upscale_method.split(' ', 1) + + # Upscale samples if enabled + if upscale_method[0] == "[latent]": + if upscale_method[1] != "None": + samples = self.upscale(samples, upscale_method[1], factor, crop)[0] + + if upscale_method[0] == "[hiresFix]": + if (images is None): + images = vae.decode(samples["samples"]) + hiresfix = ttN_modelScale() + if upscale_model_name == "None": + raise ValueError("Unable to model upscale. Please install an upscale model and try again.") + samples = hiresfix.upscale(upscale_model_name, vae, images, True if rescale != 'None' else False, upscale_method[1], rescale, percent, width, height, longer_side, crop, "return latent", None, True) + + return samples + + def get_output(self, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input pipe dictionary.""" + return ( + pipe, + pipe.get("model"), + pipe.get("positive"), + pipe.get("negative"), + pipe.get("samples"), + pipe.get("vae"), + pipe.get("clip"), + pipe.get("images"), + pipe.get("seed") + ) + + def get_output_sdxl(self, sdxl_pipe: dict, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" + return ( + sdxl_pipe, + pipe, + sdxl_pipe.get("model"), + sdxl_pipe.get("positive"), + sdxl_pipe.get("negative"), + sdxl_pipe.get("refiner_model"), + sdxl_pipe.get("refiner_positive"), + sdxl_pipe.get("refiner_negative"), + sdxl_pipe.get("samples"), + sdxl_pipe.get("vae"), + sdxl_pipe.get("clip"), + sdxl_pipe.get("images"), + sdxl_pipe.get("seed") + ) + +class ttNadv_xyPlot: + def __init__(self, adv_xyPlot, unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor): + self.executor = executor + self.unique_id = str(unique_id) + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.save_prefix = save_prefix + self.image_output = image_output + + self.latent_list = [] + self.image_list = [] + self.ui_list = [] + + self.adv_xyPlot = adv_xyPlot + self.x_points = adv_xyPlot.get("x_plot", None) + self.y_points = adv_xyPlot.get("y_plot", None) + self.z_points = adv_xyPlot.get("z_plot", None) + self.save_individuals = adv_xyPlot.get("save_individuals", False) + self.image_output = prompt[str(unique_id)]["inputs"]["image_output"] + self.invert_bg = adv_xyPlot.get("invert_bg", False) + self.x_labels = [] + self.y_labels = [] + self.z_labels = [] + + self.grid_spacing = adv_xyPlot["grid_spacing"] + self.max_width, self.max_height = 0, 0 + self.num_cols = len(self.x_points) if self.x_points else 1 + self.num_rows = len(self.y_points) if self.y_points else 1 + + self.num = 0 + self.total = (self.num_cols if self.num_cols > 0 else 1) * (self.num_rows if self.num_rows > 0 else 1) + + def reset(self): + self.executor.reset() + self.executor = None + self.clear_caches() + + def clear_caches(self): + self.latent_list = [] + self.image_list = [] + self.ui_list = [] + self.num = 0 + + @staticmethod + def get_font(font_size): + font = None + if os.path.exists(ttNpaths.font_path): + try: + font = ImageFont.truetype(str(Path(ttNpaths.font_path)), font_size) + except: + pass + + if font is None: + font = ImageFont.load_default(font_size) + + return font + + @staticmethod + def rearrange_tensors(latent, num_cols, num_rows): + new_latent = [] + for i in range(num_rows): + for j in range(num_cols): + index = j * num_rows + i + new_latent.append(latent[index]) + return new_latent + + @staticmethod + def _get_nodes_to_keep(nodeID, prompt): + nodes_to_keep = OrderedDict([(nodeID, None)]) + + toCheck = [nodeID] + + while toCheck: + current_node_id = toCheck.pop() + current_node = prompt[current_node_id] + + for input_key in current_node["inputs"]: + value = current_node["inputs"][input_key] + + if isinstance(value, list) and len(value) == 2: + input_node_id = value[0] + + if input_node_id not in nodes_to_keep: + nodes_to_keep[input_node_id] = None + toCheck.append(input_node_id) + + return list(reversed(list(nodes_to_keep.keys()))) + + def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=21): + label_width = img.width if is_x_label else img.height + + font_size = self.adjust_font_size(text, initial_font_size, label_width) + font_size = min(max_font_size, font_size) + font_size = max(min_font_size, font_size) + + if self.invert_bg: + fill_color = 'white' + else: + fill_color = 'black' + + label_bg = Image.new('RGBA', (label_width, 0), color=(0, 0, 0, 0)) # Temporary height + d = ImageDraw.Draw(label_bg) + + font = self.get_font(font_size) + + def split_text_into_lines(text, font, label_width): + words = text.split() + if words == []: + return ['None'] + lines = [] + current_line = words[0] + for word in words[1:]: + try: + if d.textsize(f"{current_line} {word}", font=font)[0] <= label_width: + current_line += " " + word + else: + lines.append(current_line) + current_line = word + except: + if d.textlength(f"{current_line} {word}", font=font) <= label_width: + current_line += " " + word + else: + lines.append(current_line) + current_line = word + lines.append(current_line) + return lines + + lines = split_text_into_lines(text, font, label_width) + + line_height = int(font_size * 1.2) # Increased line height for spacing + label_height = len(lines) * line_height + + label_bg = Image.new('RGBA', (label_width, label_height), color=(0, 0, 0, 0)) + d = ImageDraw.Draw(label_bg) + + current_y = 0 + for line in lines: + try: + text_width, _ = d.textsize(line, font=font) + except: + text_width = d.textlength(line, font=font) + text_x = (label_width - text_width) // 2 + text_y = current_y + current_y += line_height + d.text((text_x, text_y), line, fill=fill_color, font=font) + + return label_bg + + def calculate_background_dimensions(self): + border_size = int((self.max_width//8)*1.5) if self.y_points is not None or self.x_points is not None else 0 + bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * (self.y_points != None) + bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * (self.x_points != None) + border_size * (self.z_points["1"]["label"] != None) + + x_offset_initial = border_size if self.y_points is not None else 0 + y_offset = border_size if self.x_points is not None else 0 + + return bg_width, bg_height, x_offset_initial, y_offset + + def get_relevant_prompt(self): + nodes_to_keep = self._get_nodes_to_keep(self.unique_id, self.prompt) + new_prompt = {node_id: self.prompt[node_id] for node_id in nodes_to_keep} + + if self.save_individuals == True: + if self.image_output in ["Hide", "Hide/Save"]: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Hide/Save" + else: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Save" + elif self.image_output in ["Preview", "Save"]: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Preview" + else: + new_prompt[self.unique_id]["inputs"]["image_output"] = "Hide" + + return new_prompt + + def plot_images(self, z_label): + bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() + + if self.invert_bg: + bg_color = (0, 0, 0, 255) + else: + bg_color = (255, 255, 255, 255) + + background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=bg_color) + + for row_index in range(self.num_rows): + x_offset = x_offset_initial + + for col_index in range(self.num_cols): + index = col_index * self.num_rows + row_index + img = self.image_list[index] + background.paste(img, (x_offset, y_offset)) + + # Handle X label + if row_index == 0 and self.x_points is not None: + label_bg = self.create_label(img, self.x_labels[col_index], int(48 * img.width / 512)) + label_y = (y_offset - label_bg.height) // 2 + background.alpha_composite(label_bg, (x_offset, label_y)) + + # Handle Y label + if col_index == 0 and self.y_points is not None: + label_bg = self.create_label(img, self.y_labels[row_index], int(48 * img.height / 512), False) + label_bg = label_bg.rotate(90, expand=True) + + label_x = (x_offset - label_bg.width) // 2 + label_y = y_offset + (img.height - label_bg.height) // 2 + background.alpha_composite(label_bg, (label_x, label_y)) + + # Handle Z label + if z_label is not None: + label_bg = self.create_label(background, z_label, int(48 * img.height / 512)) + label_y = background.height - label_bg.height - (label_bg.height) // 2 + background.alpha_composite(label_bg, (0, label_y)) + + x_offset += img.width + self.grid_spacing + + y_offset += img.height + self.grid_spacing + + return sampler.pil2tensor(background) + + def adjust_font_size(self, text, initial_font_size, label_width): + font = self.get_font(initial_font_size) + left, top, right, bottom = font.getbbox(text) + text_width = right - left + + scaling_factor = 0.9 + if text_width > (label_width * scaling_factor): + return int(initial_font_size * (label_width / text_width) * scaling_factor) + else: + return initial_font_size + + def execute_prompt(self, prompt, extra_data, x_label, y_label, z_label): + prompt_id = uuid.uuid4() + + # Try to get the current event loop + try: + loop = asyncio.get_event_loop() + except RuntimeError: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + if loop.is_running(): + # Already inside an event loop (e.g. some backends or async-enabled ComfyUI) + import threading + + result_container = {} + + def run_coroutine(): + coro = execution.validate_prompt(prompt_id, prompt, None) + result_container["result"] = asyncio.run(coro) + + thread = threading.Thread(target=run_coroutine) + thread.start() + thread.join() + + valid = result_container["result"] + else: + # Safe to run directly + valid = loop.run_until_complete(execution.validate_prompt(prompt_id, prompt, None)) + + if valid[0]: + ttNl(f'{CC.GREY}X: {x_label}, Y: {y_label} Z: {z_label}').t(f'Plot Values {self.num}/{self.total} ->').p() + + self.executor.execute(prompt, self.num, extra_data, valid[2]) + + if len(self.executor.outputs.get(self.unique_id, [])) > 2: + self.latent_list.append(self.executor.outputs[self.unique_id][-6][0]["samples"]) + + image = self.executor.outputs[self.unique_id][-3][0] + else: + current_node = prompt[self.unique_id] + input_link = current_node["inputs"]["image"] + + image = self.executor.outputs[input_link[0]][input_link[1]][0] + + pil_image = ttNsampler.tensor2pil(image) + self.image_list.append(pil_image) + + self.max_width = max(self.max_width, pil_image.width) + self.max_height = max(self.max_height, pil_image.height) + else: + raise Exception(valid[1]) + + @staticmethod + def _parse_value(input_name, value, node_inputs, input_types, regex): + # append mode + if '.append' in input_name: + input_name = input_name.replace('.append', '') + value = node_inputs[input_name] + ' ' + value + + # Search and Replace + matches = regex.findall(value) + if matches: + value = node_inputs[input_name] + for search, replace in matches: + pattern = re.compile(re.escape(search), re.IGNORECASE) + value = pattern.sub(replace, value) + + # set value to correct type + for itype in ['required', 'optional']: + for iname in input_types.get(itype) or []: + if iname == input_name: + ivalues = input_types[itype][iname] + if ivalues[0] == 'INT': + value = int(float(value)) + elif ivalues[0] == 'FLOAT': + value = float(value) + elif ivalues[0] in ['BOOL', 'BOOLEAN']: + if value.lower() == 'true': + value = True + elif value.lower() == 'false': + value = False + value = bool(value) + elif type(ivalues[0]) == list: + if value not in ivalues[0]: + raise KeyError(f'"{value}" not a valid value for input "{iname}" in xyplot') + + return input_name, value + + def xy_plot_process(self): + if self.x_points is None and self.y_points is None: + return None, None, None, + + regex = re.compile(r'%(.*?);(.*?)%') + + x_label, y_label, z_label = None, None, None + base_prompt = self.get_relevant_prompt() + + if self.z_points is None: + self.z_points = {'1': {'label': None}} + + plot_images = [] + pil_images = [] + images = [] + latents = [] + + def update_prompt(prompt, nodes): + for node_id, inputs in nodes.items(): + if node_id == 'label': + continue + try: + node_inputs = prompt[node_id]["inputs"] + except KeyError: + raise KeyError(f'Node with ID: [{node_id}] not found in prompt for xyPlot') + class_type = prompt[node_id]["class_type"] + class_def = COMFY_CLASS_MAPPINGS[class_type] + input_types = class_def.INPUT_TYPES() + + for input_name, value in inputs.items(): + input_name, value = self._parse_value(input_name, value, node_inputs, input_types, regex) + node_inputs[input_name] = value + + return prompt + + def execute_y_plot(prompt, x_label, z_label): + for _, nodes in self.y_points.items(): + y_label = nodes["label"] + self.y_labels.append(y_label) + y_prompt = copy.deepcopy(prompt) + y_prompt = update_prompt(y_prompt, nodes) + + self.num += 1 + self.execute_prompt(y_prompt, self.extra_pnginfo, x_label, y_label, z_label) + + for _, nodes in self.z_points.items(): + z_label = nodes["label"] + z_prompt = copy.deepcopy(base_prompt) + z_prompt = update_prompt(z_prompt, nodes) + + if self.x_points: + for _, nodes in self.x_points.items(): + x_label = nodes["label"] + self.x_labels.append(x_label) + x_prompt = copy.deepcopy(z_prompt) + x_prompt = update_prompt(x_prompt, nodes) + + if self.y_points: + execute_y_plot(x_prompt, x_label, z_label) + else: + self.num += 1 + self.execute_prompt(x_prompt, self.extra_pnginfo, x_label, y_label, z_label) + + elif self.y_points: + execute_y_plot(z_prompt, None, z_label) + + # Rearrange latent array to match preview image grid + if len(self.latent_list) > 0: + latents.extend(self.rearrange_tensors(self.latent_list, self.num_cols, self.num_rows)) + + # Plot images + plot_images.append(self.plot_images(z_label)) + + # Rearrange images for outputs + pil_images.extend(self.rearrange_tensors(self.image_list, self.num_cols, self.num_rows)) + + self.clear_caches() + + # Concatenate the tensors along the first dimension (dim=0) + if len(latents) > 0: + latents = torch.cat(latents, dim=0) + + for image in pil_images: + images.append(sampler.pil2tensor(image)) + + plot_out = torch.cat(plot_images, dim=0) + images_out = torch.cat(images, dim=0) + samples = {"samples": latents} + + return plot_out, images_out, samples + +class ttNsave: + def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): + self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None + self.overwrite_existing = overwrite_existing + self.my_unique_id = my_unique_id + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.type = 'temp' + self.output_dir = output_dir + if self.output_dir != folder_paths.get_temp_directory(): + self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) + if not os.path.exists(self.output_dir): + self._create_directory(self.output_dir) + + @staticmethod + def _create_directory(folder: str): + """Try to create the directory and log the status.""" + ttNl(f"Folder {folder} does not exist. Attempting to create...").warn().p() + if not os.path.exists(folder): + try: + os.makedirs(folder) + ttNl(f"{folder} Created Successfully").success().p() + except OSError: + ttNl(f"Failed to create folder {folder}").error().p() + pass + + @staticmethod + def _map_filename(filename: str, filename_prefix: str) -> Tuple[int, str, Optional[int]]: + """Utility function to map filename to its parts.""" + + # Get the prefix length and extract the prefix + prefix_len = len(os.path.basename(filename_prefix)) + prefix = filename[:prefix_len] + + # Search for the primary digits + digits = re.search(r'(\d+)', filename[prefix_len:]) + + # Search for the number in brackets after the primary digits + group_id = re.search(r'\((\d+)\)', filename[prefix_len:]) + + return (int(digits.group()) if digits else 0, prefix, int(group_id.group(1)) if group_id else 0) + + @staticmethod + def _format_date(text: str, date: datetime.datetime) -> str: + """Format the date according to specific patterns.""" + date_formats = { + 'd': lambda d: d.day, + 'dd': lambda d: '{:02d}'.format(d.day), + 'M': lambda d: d.month, + 'MM': lambda d: '{:02d}'.format(d.month), + 'h': lambda d: d.hour, + 'hh': lambda d: '{:02d}'.format(d.hour), + 'm': lambda d: d.minute, + 'mm': lambda d: '{:02d}'.format(d.minute), + 's': lambda d: d.second, + 'ss': lambda d: '{:02d}'.format(d.second), + 'y': lambda d: d.year, + 'yy': lambda d: str(d.year)[2:], + 'yyy': lambda d: str(d.year)[1:], + 'yyyy': lambda d: d.year, + } + + # We need to sort the keys in reverse order to ensure we match the longest formats first + for format_str in sorted(date_formats.keys(), key=len, reverse=True): + if format_str in text: + text = text.replace(format_str, str(date_formats[format_str](date))) + return text + + @staticmethod + def _gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '', collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[str, Union[str, List[str]]]: + """Recursively gather all inputs from the prompt dictionary.""" + if prompt == None: + return None + + collected_inputs = collected_inputs or {} + prompt_inputs = prompt[str(unique_id)]["inputs"] + + for p_input, p_input_value in prompt_inputs.items(): + a_input = f"{linkInput}>{p_input}" if linkInput else p_input + + if isinstance(p_input_value, list): + ttNsave._gather_all_inputs(prompt, p_input_value[0], a_input, collected_inputs) + else: + existing_value = collected_inputs.get(a_input) + if existing_value is None: + collected_inputs[a_input] = p_input_value + elif p_input_value not in existing_value: + collected_inputs[a_input] = existing_value + "; " + p_input_value + + return collected_inputs + + @staticmethod + def _get_filename_with_padding(output_dir, filename, number_padding, group_id, ext): + """Return filename with proper padding.""" + try: + filtered = list(filter(lambda a: a[1] == filename, map(lambda x: ttNsave._map_filename(x, filename), os.listdir(output_dir)))) + last = max(filtered)[0] + + for f in filtered: + if f[0] == last: + if f[2] == 0 or f[2] == group_id: + last += 1 + counter = last + except (ValueError, FileNotFoundError): + os.makedirs(output_dir, exist_ok=True) + counter = 1 + + if group_id == 0: + return f"{filename}.{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}.{ext}" + else: + return f"{filename}_({group_id}).{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}_({group_id}).{ext}" + + @staticmethod + def filename_parser(output_dir: str, filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str, number_padding: int, group_id: int, ext: str) -> str: + """Parse the filename using provided patterns and replace them with actual values.""" + filename = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), filename_prefix) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + #filename = re.sub(r'%(.*?)\s*(?::(\d+))?%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))[:int(m.group(2)) if m.group(2) else len(str(all_inputs.get(m.group(1), '')))]), filename) + + filename = re.sub(r'%(.*?)%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))), filename) + + subfolder = os.path.dirname(os.path.normpath(filename)) + filename = os.path.basename(os.path.normpath(filename)) + + output_dir = os.path.join(output_dir, subfolder) + + filename = re.sub(r'[^a-zA-Z0-9_\-\. ]', '', filename)[:240-len(ext)] + filename = ttNsave._get_filename_with_padding(output_dir, filename, number_padding, group_id, ext) + + return filename, subfolder + + @staticmethod + def folder_parser(output_dir: str, prompt: Dict[str, dict], my_unique_id: str): + output_dir = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), output_dir) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + return re.sub(r'%(.*?)%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))), output_dir) + #return re.sub(r'%(.*?)\s*(?::(\d+))?%', lambda m: re.sub(r'[^a-zA-Z0-9_\-\. ]', '', str(all_inputs.get(m.group(1), ''))[:int(m.group(2)) if m.group(2) else len(str(all_inputs.get(m.group(1), '')))]), output_dir) + + def images(self, images, filename_prefix, output_type, embed_workflow=True, ext="png", group_id=0): + FORMAT_MAP = { + "png": "PNG", + "jpg": "JPEG", + "jpeg": "JPEG", + "bmp": "BMP", + "tif": "TIFF", + "tiff": "TIFF", + "webp": "WEBP", + } + + if ext not in FORMAT_MAP: + raise ValueError(f"Unsupported file extension {ext}") + + if output_type in ("Hide", "Disabled"): + return list() + if output_type in ("Save", "Hide/Save"): + output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() + self.type = "output" + if output_type == "Preview": + output_dir = folder_paths.get_temp_directory() + filename_prefix = 'ttNpreview' + ext = "png" + + results=list() + for image in images: + img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) + + filename = filename_prefix.replace("%width%", str(img.size[0])).replace("%height%", str(img.size[1])) + + filename, subfolder = ttNsave.filename_parser(output_dir, filename, self.prompt, self.my_unique_id, self.number_padding, group_id, ext) + + file_path = os.path.join(output_dir, subfolder, filename) + + if (embed_workflow in (True, "True")) and (ext in ("png", "webp")): + if ext == "png": + metadata = PngInfo() + if self.prompt is not None: + metadata.add_text("prompt", json.dumps(self.prompt)) + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + metadata.add_text(x, json.dumps(self.extra_pnginfo[x])) + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, pnginfo=metadata, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + if ext == "webp": + img_exif = img.getexif() + workflow_metadata = '' + prompt_str = '' + if self.prompt is not None: + prompt_str = json.dumps(self.prompt) + img_exif[0x010f] = "Prompt:" + prompt_str + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + workflow_metadata += json.dumps(self.extra_pnginfo[x]) + + img_exif[0x010e] = "Workflow:" + workflow_metadata + exif_data = img_exif.tobytes() + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, exif=exif_data, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + else: + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + results.append({ + "filename": file_path, + "subfolder": subfolder, + "type": self.type + }) + + return results + + def textfile(self, text, filename_prefix, ext='txt'): + output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() + + filename, subfolder = ttNsave.filename_parser(output_dir, filename_prefix, self.prompt, self.my_unique_id, self.number_padding, 0, ext) + + file_path = os.path.join(output_dir, subfolder, filename) + + if self.overwrite_existing or not os.path.isfile(file_path): + with open(file_path, 'w') as f: + f.write(text) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + +loader = ttNloader() +sampler = ttNsampler() + +#---------------------------------------------------------------ttN/pipe START----------------------------------------------------------------------# +class ttN_pipeLoader_v2: + version = '2.1.0' + @classmethod + def INPUT_TYPES(cls): + aspect_ratios = ["width x height [custom]", + "512 x 512 [S] 1:1", + "768 x 768 [S] 1:1", + "910 x 910 [S] 1:1", + + "512 x 682 [P] 3:4", + "512 x 768 [P] 2:3", + "512 x 910 [P] 9:16", + + "682 x 512 [L] 4:3", + "768 x 512 [L] 3:2", + "910 x 512 [L] 16:9", + + "512 x 1024 [P] 1:2", + "1024 x 512 [L] 2:1", + "1024 x 1024 [S] 1:1", + ] + + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), + + "loras": ("STRING", {"placeholder": "", "multiline": True}), + + "positive": ("STRING", {"default": "Positive","multiline": True, "dynamicPrompts": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True, "dynamicPrompts": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "empty_latent_aspect": (aspect_ratios, {"default":"512 x 512 [S] 1:1"}), + "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": { + "model_override": ("MODEL",), + "clip_override": ("CLIP",), + "optional_lora_stack": ("LORA_STACK",), + "optional_controlnet_stack": ("CONTROL_NET_STACK",), + "prepend_positive": ("STRING", {"forceInput": True}), + "prepend_negative": ("STRING", {"forceInput": True}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_pipeLoader_v2.version, "my_unique_id": "UNIQUE_ID",} + } + + RETURN_TYPES = ("PIPE_LINE" ,"MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "INT", "INT", "INT", "STRING", "STRING") + RETURN_NAMES = ("pipe","model", "positive", "negative", "latent", "vae", "clip", "seed", "width", "height", "pos_string", "neg_string") + + FUNCTION = "adv_pipeloader" + CATEGORY = "🌏 tinyterra/pipe" + + def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, + loras, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, + empty_latent_aspect, empty_latent_width, empty_latent_height, batch_size, seed, + model_override=None, clip_override=None, optional_lora_stack=None, optional_controlnet_stack=None, prepend_positive=None, prepend_negative=None, + prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + loader.clear_cache(prompt) + model, clip, vae = loader.load_main3(ckpt_name, config_name, vae_name, loras, clip_skip, model_override, clip_override, optional_lora_stack, my_unique_id) + + # Create Empty Latent + sd3 = True if sampler.get_model_type(model) in ['FLUX', 'FLOW'] else False + latent = sampler.emptyLatent(empty_latent_aspect, batch_size, empty_latent_width, empty_latent_height, sd3) + samples = {"samples":latent} + + positive_embedding = loader.embedding_encode(positive, positive_token_normalization, positive_weight_interpretation, clip, seed=seed, title='pipeLoader Positive', my_unique_id=my_unique_id, prepend_text=prepend_positive) + negative_embedding = loader.embedding_encode(negative, negative_token_normalization, negative_weight_interpretation, clip, seed=seed, title='pipeLoader Negative', my_unique_id=my_unique_id, prepend_text=prepend_negative) + + if optional_controlnet_stack is not None and len(optional_controlnet_stack) > 0: + for cnt in optional_controlnet_stack: + positive_embedding, negative_embedding = loader.load_controlNet(positive_embedding, negative_embedding, cnt[0], cnt[1], cnt[2], cnt[3], cnt[4]) + + image = None + + pipe = {"model": model, + "positive": positive_embedding, + "negative": negative_embedding, + "vae": vae, + "clip": clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": None, + } + + final_positive = (prepend_positive + ' ' if prepend_positive else '') + (positive + ' ' if positive else '') + final_negative = (prepend_negative + ' ' if prepend_negative else '') + (negative + ' ' if negative else '') + + return (pipe, model, positive_embedding, negative_embedding, samples, vae, clip, seed, empty_latent_width, empty_latent_height, final_positive, final_negative) + +class ttN_pipeKSampler_v2: + version = '2.3.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_pipeKSampler_v2.version}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/pipe" + + def sample(self, pipe, + lora_name, lora_strength, + steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, denoise=1.0, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, input_image_override=None, + seed=None, adv_xyPlot=None, upscale_model_name=None, upscale_method=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, + prompt=None, extra_pnginfo=None, my_unique_id=None, start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + samp_model = optional_model if optional_model is not None else pipe["model"] + samp_positive = optional_positive if optional_positive is not None else pipe["positive"] + samp_negative = optional_negative if optional_negative is not None else pipe["negative"] + samp_samples = optional_latent if optional_latent is not None else pipe["samples"] + samp_images = input_image_override if input_image_override is not None else pipe["images"] + samp_vae = optional_vae if optional_vae is not None else pipe["vae"] + samp_clip = optional_clip if optional_clip is not None else pipe["clip"] + + if seed in (None, 'undefined'): + samp_seed = pipe["seed"] + else: + samp_seed = seed + + del pipe + + def process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise): + # Load Lora + if lora_name not in (None, "None"): + samp_model, samp_clip = loader.load_lora(lora_name, samp_model, samp_clip, lora_model_strength, lora_clip_strength) + + # Upscale samples if enabled + if upscale_method != "None": + samp_samples = sampler.handle_upscale(samp_samples, upscale_method, factor, crop, upscale_model_name, samp_vae, samp_images, rescale, percent, width, height, longer_side) + + samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + results = list() + if (image_output != "Disabled"): + # Save images + latent = samp_samples["samples"] + samp_images = samp_vae.decode(latent) + + results = ttN_save.images(samp_images, save_prefix, image_output, embed_workflow, file_type) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samp_samples, + "images": samp_images, + "seed": samp_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (*sampler.get_output(new_pipe), None) + + return {"ui": {"images": results}, + "result": (*sampler.get_output(new_pipe), None)} + + def process_xyPlot(samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot): + + random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + if samples is None and images is None: + return process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samples, + "images": images, + "seed": samp_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save"): + return (*sampler.get_output(new_pipe), plot_image) + + return {"ui": {"images": plot_result}, "result": (*sampler.get_output(new_pipe), plot_image)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save", "Disabled"): + preview_latent = False + + if adv_xyPlot is None: + return process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_strength, lora_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent) + else: + return process_xyPlot(samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_strength, lora_strength, steps, cfg, sampler_name, + scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot) + +class ttN_pipeKSamplerAdvanced_v2: + version = '2.3.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "add_noise": (["enable", "disable"], ), + "noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "return_with_leftover_noise": (["disable", "enable"], ), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": { + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_pipeKSamplerAdvanced_v2.version + }, + } + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "adv_sample" + CATEGORY = "🌏 tinyterra/pipe" + + def adv_sample(self, pipe, + lora_name, lora_strength, + add_noise, steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, noise, + noise_seed=None, optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, input_image_override=None, adv_xyPlot=None, upscale_method=None, upscale_model_name=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, start_at_step=None, end_at_step=None, return_with_leftover_noise=False): + + force_full_denoise = True + if return_with_leftover_noise == "enable": + force_full_denoise = False + + disable_noise = False + if add_noise == "disable": + disable_noise = True + + return ttN_pipeKSampler_v2.sample(self, pipe, lora_name, lora_strength, steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, noise, + optional_model, optional_positive, optional_negative, optional_latent, optional_vae, optional_clip, input_image_override, noise_seed, adv_xyPlot, upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, prompt, extra_pnginfo, my_unique_id, start_at_step, end_at_step, force_full_denoise, disable_noise) + +class ttN_pipeLoaderSDXL_v2: + version = '2.1.0' + @classmethod + def INPUT_TYPES(cls): + aspect_ratios = ["width x height [custom]", + "1024 x 1024 [S] 1:1", + + "640 x 1536 [P] 9:21", + "704 x 1472 [P] 9:19", + "768 x 1344 [P] 9:16", + "768 x 1216 [P] 5:8", + "832 x 1216 [P] 2:3", + "896 x 1152 [P] 3:4", + + "1536 x 640 [L] 21:9", + "1472 x 704 [L] 19:9", + "1344 x 768 [L] 16:9", + "1216 x 768 [L] 8:5", + "1216 x 832 [L] 3:2", + "1152 x 896 [L] 4:3", + ] + relative_ratios = ["width x height [custom]", + "1x Empty Latent Aspect", + "2x Empty Latent Aspect", + "3x Empty Latent Aspect", + "4x Empty Latent Aspect", + "5x Empty Latent Aspect", + "6x Empty Latent ASpect", + "7x Empty Latent Aspect", + "8x Empty Latent Aspect", + ] + + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -2, "min": -24, "max": 0, "step": 1}), + + "loras": ("STRING", {"placeholder": "Loras - ", "multiline": True}), + + "refiner_ckpt_name": (["None"] + folder_paths.get_filename_list("checkpoints"), ), + "refiner_config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + + "positive_g": ("STRING", {"placeholder": "Linguistic Positive (positive_g)","multiline": True, "dynamicPrompts": True}), + "positive_l": ("STRING", {"placeholder": "Supporting Terms (positive_l)", "multiline": True, "dynamicPrompts": True}), + "negative_g": ("STRING", {"placeholder": "negative_g", "multiline": True, "dynamicPrompts": True}), + "negative_l": ("STRING", {"placeholder": "negative_l", "multiline": True, "dynamicPrompts": True}), + + "conditioning_aspect": (relative_ratios, {"default": "1x Empty Latent Aspect"}), + "conditioning_width": ("INT", {"default": 2048.0, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "conditioning_height": ("INT", {"default": 2048.0, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + + "crop_width": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), + "crop_height": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), + + "target_aspect": (relative_ratios, {"default": "1x Empty Latent Aspect"}), + "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + + "positive_ascore": ("INT", {"default": 6.0, "min": 0, "step": 0.1}), + "negative_ascore": ("INT", {"default": 2.0, "min": 0, "step": 0.1}), + + "empty_latent_aspect": (aspect_ratios, {"default": "1024 x 1024 [S] 1:1"}), + "empty_latent_width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": { + "model_override": ("MODEL",), + "clip_override": ("CLIP",), + "optional_lora_stack": ("LORA_STACK",), + "optional_controlnet_stack": ("CONTROL_NET_STACK",), + "refiner_model_override": ("MODEL",), + "refiner_clip_override": ("CLIP",), + "prepend_positive_g": ("STRING", {"forceInput": True}), + "prepend_positive_l": ("STRING", {"forceInput": True}), + "prepend_negative_g": ("STRING", {"forceInput": True}), + "prepend_negative_l": ("STRING", {"forceInput": True}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_pipeLoaderSDXL_v2.version, "my_unique_id": "UNIQUE_ID",} + } + + RETURN_TYPES = ("PIPE_LINE_SDXL" ,"MODEL", "CONDITIONING", "CONDITIONING", "VAE", "CLIP", "MODEL", "CONDITIONING", "CONDITIONING", "CLIP", "LATENT", "INT", "INT", "INT", "STRING", "STRING") + RETURN_NAMES = ("sdxl_pipe","model", "positive", "negative", "vae", "clip", "refiner_model", "refiner_positive", "refiner_negative", "refiner_clip", "latent", "seed", "width", "height", "pos_string", "neg_string") + + + FUNCTION = "sdxl_pipeloader" + CATEGORY = "🌏 tinyterra/pipe" + + def sdxl_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, loras, + refiner_ckpt_name, refiner_config_name, + conditioning_aspect, conditioning_width, conditioning_height, crop_width, crop_height, target_aspect, target_width, target_height, + positive_g, positive_l, negative_g, negative_l, + positive_ascore, negative_ascore, + empty_latent_aspect, empty_latent_width, empty_latent_height, batch_size, seed, + model_override=None, clip_override=None, optional_lora_stack=None, optional_controlnet_stack=None, + refiner_model_override=None, refiner_clip_override=None, + prepend_positive_g=None, prepend_positive_l=None, prepend_negative_g=None, prepend_negative_l=None, + prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + loader.clear_cache(prompt) + model, clip, vae = loader.load_main3(ckpt_name, config_name, vae_name, loras, clip_skip, model_override, clip_override, optional_lora_stack, my_unique_id) + + # Create Empty Latent + sd3 = True if sampler.get_model_type(model) in ['FLUX', 'FLOW'] else False + latent = sampler.emptyLatent(empty_latent_aspect, batch_size, empty_latent_width, empty_latent_height, sd3) + samples = {"samples":latent} + + if refiner_ckpt_name not in ["None", None]: + refiner_model, refiner_clip, refiner_vae = loader.load_main3(refiner_ckpt_name, refiner_config_name, vae_name, None, clip_skip, refiner_model_override, refiner_clip_override) + else: + refiner_model, refiner_clip, refiner_vae = None, None, None + + if empty_latent_aspect and empty_latent_aspect != "width x height [custom]": + empty_latent_width, empty_latent_height = empty_latent_aspect.replace(' ', '').split('[')[0].split('x') + + if conditioning_aspect and conditioning_aspect != "width x height [custom]": + conditioning_factor = conditioning_aspect.split('x')[0] + conditioning_width = int(conditioning_factor) * int(empty_latent_width) + conditioning_height = int(conditioning_factor) * int(empty_latent_height) + + if target_aspect and target_aspect != "width x height [custom]": + target_factor = target_aspect.split('x')[0] + target_width = int(target_factor) * int(empty_latent_width) + target_height = int(target_factor) * int(empty_latent_height) + + + positive_embedding, refiner_positive_embedding = loader.embedding_encodeXL(positive_g, clip, seed=seed, title='pipeLoaderSDXL Positive', my_unique_id=my_unique_id, prepend_text=prepend_positive_g, text2=positive_l, prepend_text2=prepend_positive_l, width=conditioning_width, height=conditioning_height, crop_width=crop_width, crop_height=crop_height, target_width=target_width, target_height=target_height, refiner_clip=refiner_clip, ascore=positive_ascore) + negative_embedding, refiner_negative_embedding = loader.embedding_encodeXL(negative_g, clip, seed=seed, title='pipeLoaderSDXL Negative', my_unique_id=my_unique_id, prepend_text=prepend_negative_g, text2=negative_l, prepend_text2=prepend_negative_l, width=conditioning_width, height=conditioning_height, crop_width=crop_width, crop_height=crop_height, target_width=target_width, target_height=target_height, refiner_clip=refiner_clip, ascore=negative_ascore) + + + if optional_controlnet_stack is not None: + for cnt in optional_controlnet_stack: + positive_embedding, negative_embedding = loader.load_controlNet(positive_embedding, negative_embedding, cnt[0], cnt[1], cnt[2], cnt[3], cnt[4]) + + image = None + + sdxl_pipe = {"model": model, + "positive": positive_embedding, + "negative": negative_embedding, + "vae": vae, + "clip": clip, + + "refiner_model": refiner_model, + "refiner_positive": refiner_positive_embedding, + "refiner_negative": refiner_negative_embedding, + "refiner_clip": refiner_clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": None + } + + final_positive = (prepend_positive_g + ' ' if prepend_positive_g else '') + (positive_g + ' ' if positive_g else '') + (prepend_positive_l + ' ' if prepend_positive_l else '') + (positive_l + ' ' if positive_l else '') + final_negative = (prepend_negative_g + ' ' if prepend_negative_g else '') + (negative_g + ' ' if negative_g else '') + (prepend_negative_l + ' ' if prepend_negative_l else '') + (negative_l + ' ' if negative_l else '') + + return (sdxl_pipe, model, positive_embedding, negative_embedding, vae, clip, refiner_model, refiner_positive_embedding, refiner_negative_embedding, refiner_clip, samples, seed, empty_latent_width, empty_latent_height, final_positive, final_negative) + +class ttN_pipeKSamplerSDXL_v2: + version = '2.3.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"sdxl_pipe": ("PIPE_LINE_SDXL",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "base_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "refiner_steps": ("INT", {"default": 20, "min": 0, "max": 10000}), + "refiner_cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "refiner_denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES, {"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_refiner_model": ("MODEL",), + "optional_refiner_positive": ("CONDITIONING",), + "optional_refiner_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_clip": ("CLIP",), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_pipeKSamplerSDXL_v2.version}, + } + + RETURN_TYPES = ("PIPE_LINE_SDXL", "PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("sdxl_pipe", "pipe","model", "positive", "negative" , "refiner_model", "refiner_positive", "refiner_negative", "latent", "vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/pipe" + + def sample(self, sdxl_pipe, + lora_name, lora_strength, + base_steps, refiner_steps, cfg, denoise, refiner_cfg, refiner_denoise, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, input_image_override=None, adv_xyPlot=None, + seed=None, upscale_model_name=None, upscale_method=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, + prompt=None, extra_pnginfo=None, my_unique_id=None, force_full_denoise=False, disable_noise=False, + optional_refiner_model=None, optional_refiner_positive=None, optional_refiner_negative=None): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + sdxl_model = optional_model if optional_model is not None else sdxl_pipe["model"] + sdxl_positive = optional_positive if optional_positive is not None else sdxl_pipe["positive"] + sdxl_negative = optional_negative if optional_negative is not None else sdxl_pipe["negative"] + sdxl_samples = optional_latent if optional_latent is not None else sdxl_pipe["samples"] + sdxl_images = input_image_override if input_image_override is not None else sdxl_pipe["images"] + sdxl_vae = optional_vae if optional_vae is not None else sdxl_pipe["vae"] + sdxl_clip = optional_clip if optional_clip is not None else sdxl_pipe["clip"] + + sdxl_refiner_model = optional_refiner_model if optional_refiner_model is not None else sdxl_pipe["refiner_model"] + sdxl_refiner_positive = optional_refiner_positive if optional_refiner_positive is not None else sdxl_pipe["refiner_positive"] + #sdxl_refiner_positive = sdxl_positive if sdxl_refiner_positive is None else sdxl_refiner_positive + sdxl_refiner_negative = optional_refiner_negative if optional_refiner_negative is not None else sdxl_pipe["refiner_negative"] + #sdxl_refiner_negative = sdxl_negative if sdxl_refiner_negative is None else sdxl_refiner_negative + sdxl_refiner_clip = sdxl_pipe["refiner_clip"] + + if seed in (None, 'undefined'): + sdxl_seed = sdxl_pipe["seed"] + else: + sdxl_seed = seed + + del sdxl_pipe + + def process_sample_state(sdxl_model, sdxl_images, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_model_strength, lora_clip_strength, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_clip, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, refiner_denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, my_unique_id, preview_latent, force_full_denoise=force_full_denoise, disable_noise=disable_noise): + + # Load Lora + if lora_name not in (None, "None"): + sdxl_model, sdxl_clip = loader.load_lora(lora_name, sdxl_model, sdxl_clip, lora_model_strength, lora_clip_strength) + + total_steps = base_steps + refiner_steps + + # Upscale samples if enabled + if upscale_method != "None": + sdxl_samples = sampler.handle_upscale(sdxl_samples, upscale_method, factor, crop, upscale_model_name, sdxl_vae, sdxl_images, rescale, percent, width, height, longer_side,) + + if (refiner_steps > 0) and (sdxl_refiner_model not in [None, "None"]): + # Base Sample + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, total_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + # Refiner Sample + sdxl_samples = sampler.common_ksampler(sdxl_refiner_model, sdxl_seed, total_steps, refiner_cfg, sampler_name, scheduler, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_samples, + denoise=refiner_denoise, preview_latent=preview_latent, start_step=base_steps, last_step=10000, force_full_denoise=True, disable_noise=True) + else: + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, base_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=True, disable_noise=disable_noise) + + results = list() + if (image_output != "Disabled"): + latent = sdxl_samples["samples"] + sdxl_images = sdxl_vae.decode(latent) + + results = ttN_save.images(sdxl_images, save_prefix, image_output, embed_workflow, file_type) + + new_sdxl_pipe = { + "model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "refiner_model": sdxl_refiner_model, + "refiner_positive": sdxl_refiner_positive, + "refiner_negative": sdxl_refiner_negative, + "refiner_clip": sdxl_refiner_clip, + + "samples": sdxl_samples, + "images": sdxl_images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + pipe = {"model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "samples": sdxl_samples, + "images": sdxl_images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), None) + + return {"ui": {"images": results}, + "result": (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), None)} + + def process_xyPlot(sdxl_model, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_model_strength, lora_clip_strength, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot): + + random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + if samples is None and images is None: + return process_sample_state(sdxl_model, sdxl_images, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_model_strength, lora_clip_strength, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_clip, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, refiner_denoise, + image_output, save_prefix, prompt, my_unique_id, preview_latent, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + new_sdxl_pipe = { + "model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "refiner_model": sdxl_refiner_model, + "refiner_positive": sdxl_refiner_positive, + "refiner_negative": sdxl_refiner_negative, + "refiner_clip": sdxl_refiner_clip, + + "samples": samples, + "images": images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + pipe = {"model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "samples": samples, + "images": images, + "seed": sdxl_seed, + + "loader_settings": None, + } + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), plot_image) + + return {"ui": {"images": plot_result}, + "result": (*sampler.get_output_sdxl(new_sdxl_pipe, pipe), plot_image)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save", "Disabled"): + preview_latent = False + + if adv_xyPlot is None: + return process_sample_state(sdxl_model, sdxl_images, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, + lora_name, lora_strength, lora_strength, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_clip, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, refiner_denoise, image_output, save_prefix, file_type, embed_workflow, prompt, my_unique_id, preview_latent) + else: + return process_xyPlot(sdxl_model, sdxl_clip, sdxl_samples, sdxl_vae, sdxl_seed, sdxl_positive, sdxl_negative, lora_name, lora_strength, lora_strength, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot) + +class ttN_pipe_EDIT: + version = '1.1.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": {}, + "optional": { + "pipe": ("PIPE_LINE",), + "model": ("MODEL",), + "pos": ("CONDITIONING",), + "neg": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + "clip": ("CLIP",), + "image": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "forceInput": True}), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_EDIT.version, "my_unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT") + RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "image", "seed") + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/pipe" + + def flush(self, pipe=None, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, image=None, seed=None, my_unique_id=None): + + model = model or pipe.get("model") + if model is None: + ttNl("Model missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + pos = pos or pipe.get("positive") + if pos is None: + ttNl("Positive conditioning missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + neg = neg or pipe.get("negative") + if neg is None: + ttNl("Negative conditioning missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + samples = latent or pipe.get("samples") + if samples is None: + ttNl("Latent missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + vae = vae or pipe.get("vae") + if vae is None: + ttNl("VAE missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + clip = clip or pipe.get("clip") + if clip is None: + ttNl("Clip missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + image = image or pipe.get("images") + if image is None: + ttNl("Image missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + seed = seed or pipe.get("seed") + if seed is None: + ttNl("Seed missing from pipeLine").t(f'pipeEdit[{my_unique_id}]').warn().p() + + new_pipe = { + "model": model, + "positive": pos, + "negative": neg, + "vae": vae, + "clip": clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": pipe["loader_settings"], + } + del pipe + + return (new_pipe, model, pos, neg, latent, vae, clip, image, seed) + +class ttN_pipe_2BASIC: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pipe": ("PIPE_LINE",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_2BASIC.version}, + } + + RETURN_TYPES = ("BASIC_PIPE", "PIPE_LINE",) + RETURN_NAMES = ("basic_pipe", "pipe",) + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/pipe" + + def flush(self, pipe): + basic_pipe = (pipe.get('model'), pipe.get('clip'), pipe.get('vae'), pipe.get('positive'), pipe.get('negative')) + return (basic_pipe, pipe, ) + +class ttN_pipe_2DETAILER: + version = '1.2.0' + @classmethod + def INPUT_TYPES(s): + return {"required": {"pipe": ("PIPE_LINE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "placeholder": "wildcard spec: if kept empty, this option will be ignored"}), + }, + "optional": {"sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_2DETAILER.version}, + } + + RETURN_TYPES = ("DETAILER_PIPE", "PIPE_LINE" ) + RETURN_NAMES = ("detailer_pipe", "pipe") + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/pipe" + + def flush(self, pipe, bbox_detector, wildcard, sam_model_opt=None, segm_detector_opt=None, detailer_hook=None): + detailer_pipe = (pipe.get('model'), pipe.get('clip'), pipe.get('vae'), pipe.get('positive'), pipe.get('negative'), wildcard, + bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) + return (detailer_pipe, pipe, ) + +class ttN_pipeEncodeConcat: + version = '1.0.2' + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pipe": ("PIPE_LINE",), + "toggle": ([True, False],), + }, + "optional": { + "positive": ("STRING", {"default": "Positive","multiline": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + "negative": ("STRING", {"default": "Negative","multiline": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + "optional_positive_from": ("CONDITIONING",), + "optional_negative_from": ("CONDITIONING",), + "optional_clip": ("CLIP",), + }, + "hidden": { + "ttNnodeVersion": ttN_pipeEncodeConcat.version, "my_unique_id": "UNIQUE_ID" + }, + } + + OUTPUT_NODE = True + RETURN_TYPES = ("PIPE_LINE", "CONDITIONING", "CONDITIONING", "CLIP") + RETURN_NAMES = ("pipe", "positive", "negative", "clip") + FUNCTION = "concat" + + CATEGORY = "🌏 tinyterra/pipe" + + def concat(self, toggle, positive_token_normalization, positive_weight_interpretation, + negative_token_normalization, negative_weight_interpretation, + pipe=None, positive='', negative='', seed=None, my_unique_id=None, optional_positive_from=None, optional_negative_from=None, optional_clip=None): + + if toggle == False: + return (pipe, pipe["positive"], pipe["negative"], pipe["clip"]) + + positive_from = optional_positive_from if optional_positive_from is not None else pipe["positive"] + negative_from = optional_negative_from if optional_negative_from is not None else pipe["negative"] + samp_clip = optional_clip if optional_clip is not None else pipe["clip"] + + new_text = '' + + def enConcatConditioning(text, token_normalization, weight_interpretation, conditioning_from, new_text): + out = [] + if "__" in text: + text = loader.nsp_parse(text, pipe["seed"], title="encodeConcat", my_unique_id=my_unique_id) + new_text += text + + conditioning_to, pooled = advanced_encode(samp_clip, text, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled='enable') + conditioning_to = [[conditioning_to, {"pooled_output": pooled}]] + + if len(conditioning_from) > 1: + ttNl("encode and concat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to").t(f'pipeEncodeConcat[{my_unique_id}]').warn().p() + + cond_from = conditioning_from[0][0] + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + tw = torch.cat((t1, cond_from),1) + n = [tw, conditioning_to[i][1].copy()] + out.append(n) + + return out + + pos, neg = None, None + if positive not in ['', None, ' ']: + pos = enConcatConditioning(positive, positive_token_normalization, positive_weight_interpretation, positive_from, new_text) + if negative not in ['', None, ' ']: + neg = enConcatConditioning(negative, negative_token_normalization, negative_weight_interpretation, negative_from, new_text) + + pos = pos if pos is not None else pipe["positive"] + neg = neg if neg is not None else pipe["negative"] + + new_pipe = { + "model": pipe["model"], + "positive": pos, + "negative": neg, + "vae": pipe["vae"], + "clip": samp_clip, + + "samples": pipe["samples"], + "images": pipe["images"], + "seed": pipe["seed"], + + "loader_settings": pipe["loader_settings"], + } + del pipe + + return (new_pipe, new_pipe["positive"], new_pipe["negative"], samp_clip, { "ui": { "string": new_text } } ) + +class ttN_pipeLoraStack: + version = '1.1.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + inputs = { + "required": { + "toggle": ([True, False],), + "mode": (["simple", "advanced"],), + "num_loras": ("INT", {"default": 1, "min": 0, "max": 20}), + }, + "optional": { + "optional_pipe": ("PIPE_LINE", {"default": None}), + "model_override": ("MODEL",), + "clip_override": ("CLIP",), + "optional_lora_stack": ("LORA_STACK",), + }, + "hidden": { + "ttNnodeVersion": (ttN_pipeLoraStack.version), + }, + } + + for i in range(1, 21): + inputs["optional"][f"lora_{i}_name"] = (["None"] + folder_paths.get_filename_list("loras"),{"default": "None"}) + inputs["optional"][f"lora_{i}_strength"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) + inputs["optional"][f"lora_{i}_model_strength"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) + inputs["optional"][f"lora_{i}_clip_strength"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}) + + return inputs + + + RETURN_TYPES = ("PIPE_LINE", "LORA_STACK",) + RETURN_NAMES = ("optional_pipe","lora_stack",) + FUNCTION = "stack" + + CATEGORY = "🌏 tinyterra/pipe" + + def stack(self, toggle, mode, num_loras, optional_pipe=None, lora_stack=None, model_override=None, clip_override=None, **kwargs): + if (toggle in [False, None, "False"]) or not kwargs: + return optional_pipe, None + + loras = [] + + # Import Stack values + if lora_stack is not None: + loras.extend([l for l in lora_stack if l[0] != "None"]) + + # Import Lora values + for i in range(1, num_loras + 1): + lora_name = kwargs.get(f"lora_{i}_name") + + if not lora_name or lora_name == "None": + continue + + if mode == "simple": + lora_strength = float(kwargs.get(f"lora_{i}_strength")) + loras.append((lora_name, lora_strength, lora_strength)) + elif mode == "advanced": + model_strength = float(kwargs.get(f"lora_{i}_model_strength")) + clip_strength = float(kwargs.get(f"lora_{i}_clip_strength")) + loras.append((lora_name, model_strength, clip_strength)) + + if not loras: + return optional_pipe, None + + if loras and not optional_pipe: + return optional_pipe, loras + + # Load Loras + model = model_override or optional_pipe.get("model") + clip = clip_override or optional_pipe.get("clip") + + if not model or not clip: + return optional_pipe, loras + + for lora in loras: + model, clip = loader.load_lora(lora[0], model, clip, lora[1], lora[2]) + + new_pipe = { + "model": model, + "positive": optional_pipe["positive"], + "negative": optional_pipe["negative"], + "vae": optional_pipe["vae"], + "clip": clip, + + "samples": optional_pipe["samples"], + "images": optional_pipe["images"], + "seed": optional_pipe["seed"], + + "loader_settings": optional_pipe["loader_settings"], + } + + del optional_pipe + + return new_pipe, loras + +#---------------------------------------------------------------ttN/pipe END------------------------------------------------------------------------# + + +#--------------------------------------------------------------ttN/base START-----------------------------------------------------------------------# +class ttN_tinyLoader: + version = '1.1.0' + @classmethod + def INPUT_TYPES(cls): + aspect_ratios = ["width x height [custom]", + "512 x 512 [S] 1:1", + "768 x 768 [S] 1:1", + "910 x 910 [S] 1:1", + + "512 x 682 [P] 3:4", + "512 x 768 [P] 2:3", + "512 x 910 [P] 9:16", + + "682 x 512 [L] 4:3", + "768 x 512 [L] 3:2", + "910 x 512 [L] 16:9", + + "1024 x 1024 [S] 1:1", + "512 x 1024 [P] 1:2", + "1024 x 512 [L] 2:1", + + "640 x 1536 [P] 9:21", + "704 x 1472 [P] 9:19", + "768 x 1344 [P] 9:16", + "768 x 1216 [P] 5:8", + "832 x 1216 [P] 2:3", + "896 x 1152 [P] 3:4", + + "1536 x 640 [L] 21:9", + "1472 x 704 [L] 19:9", + "1344 x 768 [L] 16:9", + "1216 x 768 [L] 8:5", + "1216 x 832 [L] 3:2", + "1152 x 896 [L] 4:3", + ] + + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "sampling": (["Default", "eps", "v_prediction", "lcm", "x0"], {"default": "Default"}), + "zsnr": ("BOOLEAN", {"default": False}), + "cfg_rescale_mult": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), + + "empty_latent_aspect": (aspect_ratios, {"default":"512 x 512 [S] 1:1"}), + "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_tinyLoader.version, "my_unique_id": "UNIQUE_ID",} + } + + RETURN_TYPES = ("MODEL", "LATENT", "VAE", "CLIP", "INT", "INT",) + RETURN_NAMES = ("model", "latent", "vae", "clip", "width", "height",) + + FUNCTION = "miniloader" + CATEGORY = "🌏 tinyterra/base" + + def miniloader(self, ckpt_name, config_name, sampling, zsnr, cfg_rescale_mult, vae_name, clip_skip, + empty_latent_aspect, empty_latent_width, empty_latent_height, batch_size, + prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + model, clip, vae = loader.load_checkpoint(ckpt_name, config_name, clip_skip) + + # Create Empty Latent + sd3 = True if sampler.get_model_type(model) in ['FLUX', 'FLOW'] else False + latent = sampler.emptyLatent(empty_latent_aspect, batch_size, empty_latent_width, empty_latent_height, sd3) + samples = {"samples": latent} + + if vae_name != "Baked VAE": + vae = loader.load_vae(vae_name) + + if sampling != "Default": + MSD = comfy_extras.nodes_model_advanced.ModelSamplingDiscrete() + model = MSD.patch(model, sampling, zsnr)[0] + + if cfg_rescale_mult > 0: + CFGR = comfy_extras.nodes_model_advanced.RescaleCFG() + model = CFGR.patch(model, cfg_rescale_mult)[0] + + return (model, samples, vae, clip, empty_latent_width, empty_latent_height) + +class ttN_conditioning: + version = '1.0.2' + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + + "loras": ("STRING", {"placeholder": "", "multiline": True}), + + "positive": ("STRING", {"default": "Positive","multiline": True, "dynamicPrompts": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True, "dynamicPrompts": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + "zero_out_empty": ("BOOLEAN", {"default": False}), + }, + "optional": { + "optional_lora_stack": ("LORA_STACK",), + "prepend_positive": ("STRING", {"forceInput": True}), + "prepend_negative": ("STRING", {"forceInput": True}), + }, + "hidden": {"ttNnodeVersion": ttN_conditioning.version, "my_unique_id": "UNIQUE_ID"},} + + RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "CLIP", "STRING", "STRING") + RETURN_NAMES = ("model", "positive", "negative", "clip", "pos_string", "neg_string") + + FUNCTION = "condition" + CATEGORY = "🌏 tinyterra/base" + + def condition(self, model, clip, loras, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, zero_out_empty, + optional_lora_stack=None, prepend_positive=None, prepend_negative=None, + my_unique_id=None): + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = loader.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if loras not in [None, "None"]: + model, clip = loader.load_lora_text(loras, model, clip) + + positive_embedding = loader.embedding_encode(positive, positive_token_normalization, positive_weight_interpretation, clip, title='ttN Conditioning Positive', + my_unique_id=my_unique_id, prepend_text=prepend_positive, zero_out=zero_out_empty) + negative_embedding = loader.embedding_encode(negative, negative_token_normalization, negative_weight_interpretation, clip, title='ttN Conditioning Negative', + my_unique_id=my_unique_id, prepend_text=prepend_negative, zero_out=zero_out_empty) + + final_positive = (prepend_positive + ' ' if prepend_positive else '') + (positive if positive else '') + final_negative = (prepend_negative + ' ' if prepend_negative else '') + (negative if negative else '') + + return (model, positive_embedding, negative_embedding, clip, final_positive, final_negative) + +class ttN_KSampler_v2: + version = '2.3.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "model": ("MODEL",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (UPSCALE_METHODS, {"default": "None"}), + "upscale_model_name": (UPSCALE_MODELS,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect', 'None'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (CROP_METHODS,), + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS + CUSTOM_SCHEDULERS,), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": { + "clip": ("CLIP",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "input_image_override": ("IMAGE",), + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_KSampler_v2.version + }, + } + + RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "IMAGE") + RETURN_NAMES = ("model", "positive", "negative", "latent","vae", "clip", "images", "seed", "plot_image") + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/base" + + def sample(self, model, positive, negative, latent, vae, + lora_name, lora_strength, + steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, denoise=1.0, + input_image_override=None, + clip=None, seed=None, adv_xyPlot=None, upscale_model_name=None, upscale_method=None, factor=None, rescale=None, percent=None, width=None, height=None, longer_side=None, crop=None, + prompt=None, extra_pnginfo=None, my_unique_id=None, start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + def process_sample_state(model, images, clip, samples, vae, seed, positive, negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise): + # Load Lora + if lora_name not in (None, "None"): + if clip == None: + raise ValueError(f"tinyKSampler [{my_unique_id}] - Lora requires CLIP model") + model, clip = loader.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) + + # Upscale samples if enabled + if upscale_method != "None": + samples = sampler.handle_upscale(samples, upscale_method, factor, crop, upscale_model_name, vae, images, rescale, percent, width, height, longer_side) + + samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + results = list() + if (image_output != "Disabled"): + # Save images + latent = samples["samples"] + images = vae.decode(latent) + + results = ttN_save.images(images, save_prefix, image_output, embed_workflow, file_type) + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return (model, positive, negative, samples, vae, clip, images, seed, None) + + return {"ui": {"images": results}, + "result": (model, positive, negative, samples, vae, clip, images, seed, None)} + + def process_xyPlot(model, clip, samp_samples, vae, seed, positive, negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot): + + random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + if samples is None and images is None: + return process_sample_state(model, images, clip, samp_samples, vae, seed, positive, negative, lora_name, lora_model_strength, lora_clip_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + if image_output in ("Hide", "Hide/Save"): + return (model, positive, negative, samples, vae, clip, images, seed, plot_image) + + return {"ui": {"images": plot_result}, "result": (model, positive, negative, samples, vae, clip, images, seed, plot_image)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save", "Disabled"): + preview_latent = False + + if adv_xyPlot is None: + return process_sample_state(model, input_image_override, clip, latent, vae, seed, positive, negative, lora_name, lora_strength, lora_strength, + upscale_model_name, upscale_method, factor, rescale, percent, width, height, longer_side, crop, + steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent) + else: + return process_xyPlot(model, clip, latent, vae, seed, positive, negative, lora_name, lora_strength, lora_strength, steps, cfg, sampler_name, + scheduler, denoise, image_output, save_prefix, file_type, embed_workflow, prompt, extra_pnginfo, my_unique_id, preview_latent, adv_xyPlot) + +#---------------------------------------------------------------ttN/base END------------------------------------------------------------------------# + + +#-------------------------------------------------------------ttN/xyPlot START----------------------------------------------------------------------# +class ttN_advanced_XYPlot: + version = '1.2.1' + plotPlaceholder = "_PLOT\nExample:\n\n\n[node_ID:widget_Name='value']\n\n\n[node_ID:widget_Name='value2']\n[node_ID:widget2_Name='value']\n[node_ID2:widget_Name='value']\n\netc..." + + def get_plot_points(plot_data, unique_id, plot_Line): + if plot_data is None or plot_data.strip() == '': + return None + else: + try: + axis_dict = {} + lines = plot_data.split('<') + new_lines = [] + temp_line = '' + + for line in lines: + if line.startswith('lora'): + temp_line += '<' + line + new_lines[-1] = temp_line + else: + new_lines.append(line) + temp_line = line + + for line in new_lines: + if line: + values_label = [] + line = line.split('>', 1) + num, label = line[0].split(':', 1) + axis_dict[num] = {"label": label} + for point in line[1].split("']"): + if point.strip() == '': + continue + + node_id = point.split(':', 1)[0].split('[')[1] + axis_dict[num].setdefault(node_id, {}) + input_name = point.split(':', 1)[1].split('=', 1)[0] + value = point.split("'", 1 )[1] + values_label.append((value, input_name, node_id)) + + axis_dict[num][node_id][input_name] = value + + if label in ['v_label', 'tv_label', 'idtv_label']: + new_label = [] + for value, input_name, node_id in values_label: + if label == 'v_label': + new_label.append(value) + elif label == 'tv_label': + new_label.append(f'{input_name}: {value}') + elif label == 'idtv_label': + new_label.append(f'[{node_id}] {input_name}: {value}') + axis_dict[num]['label'] = ', '.join(new_label) + + except ValueError: + ttNl('Invalid Plot - defaulting to None...').t(f'advanced_XYPlot[{unique_id}] {plot_Line} Axis').warn().p() + return None + return axis_dict + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}), + "save_individuals": ("BOOLEAN", {"default": False}), + "flip_xy": ("BOOLEAN", {"default": False}), + + "x_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'X' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "y_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'Y' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "z_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'Z' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "invert_background": ("BOOLEAN", {"default": False}), + }, + "hidden": { + "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_advanced_XYPlot.version, + }, + } + + RETURN_TYPES = ("ADV_XYPLOT", ) + RETURN_NAMES = ("adv_xyPlot", ) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, grid_spacing, save_individuals, flip_xy, x_plot=None, y_plot=None, z_plot=None, my_unique_id=None, invert_background=False): + x_plot = ttN_advanced_XYPlot.get_plot_points(x_plot, my_unique_id, 'X') + y_plot = ttN_advanced_XYPlot.get_plot_points(y_plot, my_unique_id, 'Y') + z_plot = ttN_advanced_XYPlot.get_plot_points(z_plot, my_unique_id, 'Z') + + if x_plot == {}: + x_plot = None + if y_plot == {}: + y_plot = None + + if flip_xy == True: + x_plot, y_plot = y_plot, x_plot + + xy_plot = {"x_plot": x_plot, + "y_plot": y_plot, + "z_plot": z_plot, + "grid_spacing": grid_spacing, + "save_individuals": save_individuals, + "invert_bg": invert_background} + + return (xy_plot, ) + +class ttN_Plotting(ttN_advanced_XYPlot): + def plot(self, **args): + xy_plot = None + return (xy_plot, ) + +class ttN_advPlot_images: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "enabled": ('BOOLEAN',{'default': True}), + "image": ('IMAGE',{}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save", "Disabled"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "file_type": (OUTPUT_FILETYPES,{"default": "png"}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "optional": { + "adv_xyPlot": ("ADV_XYPLOT",), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_advPlot_images.version, + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + RETURN_NAMES = ("images", "plot_image") + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, enabled, image, adv_xyPlot, image_output, save_prefix, file_type, embed_workflow, prompt=None, extra_pnginfo=None, my_unique_id=None): + if enabled == False or adv_xyPlot is None: + return (image, None) + + my_unique_id = int(my_unique_id) + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + #random.seed(seed) + + executor = xyExecutor() + plotter = ttNadv_xyPlot(adv_xyPlot, my_unique_id, prompt, extra_pnginfo, save_prefix, image_output, executor) + plot_image, images, samples = plotter.xy_plot_process() + plotter.reset() + del executor, plotter + + plot_result = ttN_save.images(plot_image, save_prefix, image_output, embed_workflow, file_type) + #plot_result.extend(ui_results) + + if image_output in ("Hide", "Hide/Save"): + return (images, plot_image) + + return {"ui": {"images": plot_result}, "result": (images, plot_image)} + +class ttN_advPlot_range: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "node": ([AnyType("Connect to xyPlot for options"),],{}), + "widget": ([AnyType("Select node for options"),],{}), + + "range_mode": (['step_int','num_steps_int','step_float','num_steps_float'],{}), + "start": ("FLOAT", {"min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "default": 1,}), + "step": ("FLOAT", {"min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "default": 1,}), + "stop": ("FLOAT", {"min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "default": 5,}), + "include_stop": ("BOOLEAN",{"default": True}), + "num_steps": ("INT", {"min": 1, "max": 1000, "step": 1, "default": 5,}), + + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + + }, + "hidden": { + "ttNnodeVersion": ttN_advPlot_range.version, + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, node, widget, range_mode, start, step, stop, include_stop, num_steps, label_type): + if '[' in node and ']' in node: + nodeid = node.split('[', 1)[1].split(']', 1)[0] + else: + return {"ui": {"text": ''}, "result": ('',)} + + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map[label_type] + + plot_text = [] + vals = [] + + if range_mode.startswith('step_'): + for num in range(1, num_steps + 1): + vals.append(start + step * (num - 1)) + if range_mode.startswith('num_steps'): + vals = np.linspace(start, stop, num_steps, endpoint=include_stop).tolist() + + for i, val in enumerate(vals): + if range_mode.endswith('int'): + val = int(round(val, 0)) + else: + val = round(val, 2) + line = f"[{nodeid}:{widget}='{val}']" + plot_text.append(f"<{i+1}:{label}>") + plot_text.append(line) + + out = '\n'.join(plot_text) + + return {"ui": {"text": out}, "result": (out,)} + +class ttN_advPlot_string: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "node": ([AnyType("Connect to xyPlot for options"),],{}), + "widget": ([AnyType("Select node for options"),],{}), + + "replace_mode": ("BOOLEAN",{"default": False}), + "search_string": ("STRING",{"default":""}), + "text": ("STRING", {"default":"","multiline": True}), + "delimiter": ("STRING", {"default":"\\n","multiline": False}), + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + }, + "hidden": { + "ttNnodeVersion": ttN_advPlot_range.version, + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, node, widget, replace_mode, search_string, text, delimiter, label_type): + if '[' in node and ']' in node: + nodeid = node.split('[', 1)[1].split(']', 1)[0] + else: + return {"ui": {"text": ''}, "result": ('',)} + + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map[label_type] + + plot_text = [] + + delimiter = delimiter.replace('\\n', '\n') + vals = text.split(delimiter) + + for i, val in enumerate(vals): + if val.strip() == '': + continue + if replace_mode: + line = f"[{nodeid}:{widget}='%{search_string};{val}%']" + else: + line = f"[{nodeid}:{widget}='{val}']" + plot_text.append(f"<{i+1}:{label}>") + plot_text.append(line) + + out = '\n'.join(plot_text) + + return {"ui": {"text": out}, "result": (out,)} + +class ttN_advPlot_combo: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "node": ([AnyType("Connect to xyPlot for options"),],{}), + "widget": ([AnyType("Select node for options"),],{}), + + "mode": (['all', 'range', 'select'],), + "start_from": ([AnyType("Select widget for options"),],), + "end_with": ([AnyType("Select widget for options"),],), + + "select": ([AnyType("Select widget for options"),],), + "selection": ("STRING", {"default":"","multiline": True}), + + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + }, + "hidden": { + "ttNnodeVersion": ttN_advPlot_range.version, "prompt": "PROMPT", + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, node, widget, mode, start_from, end_with, select, selection, label_type, prompt=None): + if '[' in node and ']' in node: + nodeid = node.split('[', 1)[1].split(']', 1)[0] + else: + return {"ui": {"text": ''}, "result": ('',)} + + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map[label_type] + + plot_text = [] + + class_type = prompt[nodeid]['class_type'] + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + valid_inputs = class_def.INPUT_TYPES() + options = valid_inputs["required"][widget][0] or valid_inputs["optional"][widget][0] + + vals = [] + if mode == 'all': + vals = options + elif mode == 'range': + start_index = options.index(start_from) + stop_index = options.index(end_with) + 1 + if start_index > stop_index: + start_index, stop_index = stop_index, start_index + vals = options[start_index:stop_index] + elif mode == 'select': + selection = selection.split('\n') + for s in selection: + s.strip() + if s in options: + vals.append(s) + + for i, val in enumerate(vals): + line = f"[{nodeid}:{widget}='{val}']" + plot_text.append(f"<{i+1}:{label}>") + plot_text.append(line) + + out = '\n'.join(plot_text) + + return {"ui": {"text": out}, "result": (out,)} + +class ttN_advPlot_merge: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "label_type": (['Values', 'Title and Values', 'ID, Title and Values'],{"default": "Values"}), + }, + "optional": { + "plot_text1": ("STRING", {"forceInput": True,}), + "plot_text2": ("STRING",{"forceInput": True,}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("plot_text",) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, label_type, plot_text1='', plot_text2='', ): + label_map = { + 'Values': 'v_label', + 'Title and Values': 'tv_label', + 'ID, Title and Values': 'idtv_label', + } + label = label_map.get(label_type, 'v_label') + + text1 = plot_text1.split("<") if plot_text1 else [] + text2 = plot_text2.split("<") if plot_text2 else [] + + number_of_lines = max(len(text1) - 1, len(text2) - 1, 0) + if number_of_lines == 0: + return '' + + lines = [] + for num in range(1, number_of_lines + 1): + lines.append(f'<{num}:{label}>\n') + + for text in (text1, text2): + if num < len(text): + parts = text[num].split('>\n', 1) + if len(parts) == 2: + lines.append(parts[1]) + if not parts[1].endswith('\n'): + lines.append('\n') + + out = ''.join(lines) + return {"ui": {"text": out}, "result": (out,)} +#--------------------------------------------------------------ttN/xyPlot END-----------------------------------------------------------------------# + + +#----------------------------------------------------------------misc START------------------------------------------------------------------------# +WEIGHTED_SUM = "Weighted sum = ( A*(1-M) + B*M )" +ADD_DIFFERENCE = "Add difference = ( A + (B-C)*M )" +A_ONLY = "A Only" +MODEL_INTERPOLATIONS = [WEIGHTED_SUM, ADD_DIFFERENCE, A_ONLY] +FOLLOW = "Follow model interp" +B_ONLY = "B Only" +C_ONLY = "C Only" +CLIP_INTERPOLATIONS = [FOLLOW, WEIGHTED_SUM, ADD_DIFFERENCE, A_ONLY, B_ONLY, C_ONLY] +ABC = "ABC" + +class ttN_multiModelMerge: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "ckpt_A_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_A_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "ckpt_B_name": (["None",] + folder_paths.get_filename_list("checkpoints"), ), + "config_B_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "ckpt_C_name": (["None",] + folder_paths.get_filename_list("checkpoints"), ), + "config_C_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + + "model_interpolation": (MODEL_INTERPOLATIONS,), + "model_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + + "clip_interpolation": (CLIP_INTERPOLATIONS,), + "clip_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "model_A_override": ("MODEL",), + "model_B_override": ("MODEL",), + "model_C_override": ("MODEL",), + "clip_A_override": ("CLIP",), + "clip_B_override": ("CLIP",), + "clip_C_override": ("CLIP",), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "ttNnodeVersion": ttN_multiModelMerge.version, "my_unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("MODEL", "CLIP",) + RETURN_NAMES = ("model", "clip",) + FUNCTION = "mergificate" + + CATEGORY = "🌏 tinyterra" + + def mergificate(self, ckpt_A_name, config_A_name, ckpt_B_name, config_B_name, ckpt_C_name, config_C_name, + model_interpolation, model_multiplier, clip_interpolation, clip_multiplier, + model_A_override=None, model_B_override=None, model_C_override=None, + clip_A_override=None, clip_B_override=None, clip_C_override=None, + prompt=None, extra_pnginfo=None, my_unique_id=None): + + def required_assets(model_interpolation, clip_interpolation): + required = set(["model_A"]) + + if clip_interpolation in [A_ONLY, B_ONLY, C_ONLY]: + required.add(f"clip_{clip_interpolation[0]}") + elif clip_interpolation in [WEIGHTED_SUM, ADD_DIFFERENCE]: + required.update([f"clip_{letter}" for letter in ABC if letter in clip_interpolation]) + elif clip_interpolation == FOLLOW: + required.add("clip_A") + + if model_interpolation in [WEIGHTED_SUM, ADD_DIFFERENCE]: + letters = [letter for letter in ABC if letter in model_interpolation] + required.update([f"model_{letter}" for letter in letters]) + if clip_interpolation == FOLLOW: + required.update([f"clip_{letter}" for letter in letters]) + + return sorted(list(required)) + + def _collect_letter(letter, required_list, model_override, clip_override, ckpt_name, config_name = None): + model, clip, loaded_clip = None, None, None + config_name = config_name + + if f'model_{letter}' in required_list: + if model_override not in [None, "None"]: + model = model_override + else: + if ckpt_name not in [None, "None"]: + model, loaded_clip, _ = loader.load_checkpoint(ckpt_name, config_name) + else: + e = f"Checkpoint name or model override not provided for model_{letter}.\nUnable to merge models using the following interpolation: {model_interpolation}" + ttNl(e).t(f'multiModelMerge [{my_unique_id}]').error().p().interrupt(e) + + if f'clip_{letter}' in required_list: + if clip_override is not None: + clip = clip_override + elif loaded_clip is not None: + clip = loaded_clip + elif ckpt_name not in [None, "None"]: + _, clip, _ = loader.load_checkpoint(ckpt_name, config_name) + else: + e = f"Checkpoint name or clip override not provided for clip_{letter}.\nUnable to merge clips using the following interpolation: {clip_interpolation}" + ttNl(e).t(f'multiModelMerge [{my_unique_id}]').error().p().interrupt(e) + + return model, clip + + def merge(base_model, base_strength, patch_model, patch_strength): + m = base_model.clone() + kp = patch_model.get_key_patches("diffusion_model.") + for k in kp: + m.add_patches({k: kp[k]}, patch_strength, base_strength) + return m + + def clip_merge(base_clip, base_strength, patch_clip, patch_strength): + m = base_clip.clone() + kp = patch_clip.get_key_patches() + for k in kp: + if k.endswith(".position_ids") or k.endswith(".logit_scale"): + continue + m.add_patches({k: kp[k]}, patch_strength, base_strength) + return m + + def _add_assets(a1, a2, is_clip=False, multiplier=1.0, weighted=False): + if is_clip: + if weighted: + return clip_merge(a1, (1.0 - multiplier), a2, multiplier) + else: + return clip_merge(a1, 1.0, a2, multiplier) + else: + if weighted: + return merge(a1, (1.0 - multiplier), a2, multiplier) + else: + return merge(a1, 1.0, a2, multiplier) + + def _subtract_assets(a1, a2, is_clip=False, multiplier=1.0): + if is_clip: + return clip_merge(a1, 1.0, a2, -multiplier) + else: + return merge(a1, 1.0, a2, -multiplier) + + required_list = required_assets(model_interpolation, clip_interpolation) + model_A, clip_A = _collect_letter("A", required_list, model_A_override, clip_A_override, ckpt_A_name, config_A_name) + model_B, clip_B = _collect_letter("B", required_list, model_B_override, clip_B_override, ckpt_B_name, config_B_name) + model_C, clip_C = _collect_letter("C", required_list, model_C_override, clip_C_override, ckpt_C_name, config_C_name) + + if (model_interpolation == A_ONLY): + model = model_A + if (model_interpolation == WEIGHTED_SUM): + model = _add_assets(model_A, model_B, False, model_multiplier, True) + if (model_interpolation == ADD_DIFFERENCE): + model = _add_assets(model_A, _subtract_assets(model_B, model_C), False, model_multiplier) + + if (clip_interpolation == FOLLOW): + clip_interpolation = model_interpolation + if (clip_interpolation == A_ONLY): + clip = clip_A + if (clip_interpolation == B_ONLY): + clip = clip_B + if (clip_interpolation == C_ONLY): + clip = clip_C + if (clip_interpolation == WEIGHTED_SUM): + clip = _add_assets(clip_A, clip_B, True, clip_multiplier, True) + if (clip_interpolation == ADD_DIFFERENCE): + clip = _add_assets(clip_A, _subtract_assets(clip_B, clip_C, True), True, clip_multiplier) + + return (model, clip) + +#-----------------------------------------------------------------misc END-------------------------------------------------------------------------# + +#---------------------------------------------------------------ttN/text START----------------------------------------------------------------------# +class ttN_text: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"default": "", "multiline": True, "dynamicPrompts": True}), + }, + "hidden": {"ttNnodeVersion": ttN_text.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + @staticmethod + def conmeow(text): + return text, + +class ttN_textDebug: + version = '1.0.' + def __init__(self): + self.num = 0 + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "print_to_console": ([False, True],), + "console_title": ("STRING", {"default": ""}), + "execute": (["Always", "On Change"],), + "text": ("STRING", {"default": '', "multiline": True, "forceInput": True, "dynamicPrompts": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_textDebug.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + FUNCTION = "write" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/text" + + def write(self, print_to_console, console_title, execute, text, prompt, extra_pnginfo, my_unique_id): + if execute == "Always": + def IS_CHANGED(self): + self.num += 1 if self.num == 0 else -1 + return self.num + setattr(self.__class__, 'IS_CHANGED', IS_CHANGED) + + if execute == "On Change": + if hasattr(self.__class__, 'IS_CHANGED'): + delattr(self.__class__, 'IS_CHANGED') + + if print_to_console == True: + if console_title != "": + ttNl(text).t(f'textDebug[{my_unique_id}] - {CC.VIOLET}{console_title}').p() + else: + input_node = prompt[my_unique_id]["inputs"]["text"] + + input_from = None + for node in extra_pnginfo["workflow"]["nodes"]: + if node['id'] == int(input_node[0]): + input_from = node['outputs'][input_node[1]].get('label') + + if input_from == None: + input_from = node['outputs'][input_node[1]].get('name') + + ttNl(text).t(f'textDebug[{my_unique_id}] - {CC.VIOLET}{input_from}').p() + + return {"ui": {"text": text}, + "result": (text,)} + +class ttN_concat: + version = '1.0.0' + def __init__(self): + pass + """ + Concatenate 2 strings + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text1": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text2": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text3": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "delimiter": ("STRING", {"default":",","multiline": False}), + }, + "hidden": {"ttNnodeVersion": ttN_concat.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("concat",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + def conmeow(self, text1='', text2='', text3='', delimiter=''): + text1 = '' if text1 == 'undefined' else text1 + text2 = '' if text2 == 'undefined' else text2 + text3 = '' if text3 == 'undefined' else text3 + + if delimiter == '\\n': + delimiter = '\n' + + concat = delimiter.join([text1, text2, text3]) + + return (concat,) + +class ttN_text3BOX_3WAYconcat: + version = '1.0.0' + def __init__(self): + pass + """ + Concatenate 3 strings, in various ways. + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text1": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text2": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text3": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "delimiter": ("STRING", {"default":",","multiline": False}), + }, + "hidden": {"ttNnodeVersion": ttN_text3BOX_3WAYconcat.version}, + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",) + RETURN_NAMES = ("text1", "text2", "text3", "1 & 2", "1 & 3", "2 & 3", "concat",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + def conmeow(self, text1='', text2='', text3='', delimiter=''): + text1 = '' if text1 == 'undefined' else text1 + text2 = '' if text2 == 'undefined' else text2 + text3 = '' if text3 == 'undefined' else text3 + + if delimiter == '\\n': + delimiter = '\n' + + t_1n2 = delimiter.join([text1, text2]) + t_1n3 = delimiter.join([text1, text3]) + t_2n3 = delimiter.join([text2, text3]) + concat = delimiter.join([text1, text2, text3]) + + return text1, text2, text3, t_1n2, t_1n3, t_2n3, concat + +class ttN_text7BOX_concat: + version = '1.0.0' + def __init__(self): + pass + """ + Concatenate many strings + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text1": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text2": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text3": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text4": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text5": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text6": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "text7": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "delimiter": ("STRING", {"default":",","multiline": False}), + }, + "hidden": {"ttNnodeVersion": ttN_text7BOX_concat.version}, + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",) + RETURN_NAMES = ("text1", "text2", "text3", "text4", "text5", "text6", "text7", "concat",) + FUNCTION = "conmeow" + + CATEGORY = "🌏 tinyterra/text" + + def conmeow(self, text1, text2, text3, text4, text5, text6, text7, delimiter): + text1 = '' if text1 == 'undefined' else text1 + text2 = '' if text2 == 'undefined' else text2 + text3 = '' if text3 == 'undefined' else text3 + text4 = '' if text4 == 'undefined' else text4 + text5 = '' if text5 == 'undefined' else text5 + text6 = '' if text6 == 'undefined' else text6 + text7 = '' if text7 == 'undefined' else text7 + + if delimiter == '\\n': + delimiter = '\n' + + texts = [text1, text2, text3, text4, text5, text6, text7] + concat = delimiter.join(text for text in texts if text) + return text1, text2, text3, text4, text5, text6, text7, concat + +class ttN_textCycleLine: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": True, "default": '', "dynamicPrompts": True}), + "index": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "index_control": (['increment', 'decrement', 'randomize','fixed'],), + }, + "hidden": {"ttNnodeVersion": ttN_textCycleLine.version}, + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "cycle" + + CATEGORY = "🌏 tinyterra/text" + + def cycle(self, text, index, index_control='randomized'): + lines = text.split('\n') + + if index >= len(lines): + index = len(lines) - 1 + return (lines[index],) + +class ttN_textOUPUT: + version = '1.0.1' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text_output": (["Preview", "Save"],{"default": "Preview"}), + "text": ("STRING", {"multiline": True}), + "output_path": ("STRING", {"default": folder_paths.get_output_directory(), "multiline": False}), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "number_padding": (["None", 2, 3, 4, 5, 6, 7, 8, 9],{"default": 5}), + "file_type": (["txt", "md", "rtf", "log", "ini", "csv"], {"default": "txt"}), + "overwrite_existing": ("BOOLEAN", {"default": False}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_imageOUPUT.version}, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + FUNCTION = "output" + CATEGORY = "🌏 tinyterra/text" + OUTPUT_NODE = True + + def output(self, text_output, text, output_path, save_prefix, number_padding, file_type, overwrite_existing, prompt, extra_pnginfo, my_unique_id): + if text_output == 'Save': + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo, number_padding, overwrite_existing, output_path) + ttN_save.textfile(text, save_prefix, file_type) + + # Output text results to ui and node outputs + return {"ui": {"text": text}, + "result": (text,)} +#---------------------------------------------------------------ttN/text END------------------------------------------------------------------------# + + +#---------------------------------------------------------------ttN/util START----------------------------------------------------------------------# +class ttN_INT: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "int": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"ttNnodeVersion": ttN_INT.version}, + } + + RETURN_TYPES = ("INT", "FLOAT", "STRING",) + RETURN_NAMES = ("int", "float", "text",) + FUNCTION = "convert" + + CATEGORY = "🌏 tinyterra/util" + + @staticmethod + def convert(int): + return int, float(int), str(int) + +class ttN_FLOAT: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "float": ("FLOAT", {"default": 0.00, "min": 0.00, "max": 0xffffffffffffffff, 'step': 0.01}), + }, + "hidden": {"ttNnodeVersion": ttN_FLOAT.version}, + } + + RETURN_TYPES = ("FLOAT", "INT", "STRING",) + RETURN_NAMES = ("float", "int", "text",) + FUNCTION = "convert" + + CATEGORY = "🌏 tinyterra/util" + + @staticmethod + def convert(float): + return float, int(float), str(float) + +class ttN_SEED: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"ttNnodeVersion": ttN_SEED.version}, + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("seed",) + FUNCTION = "plant" + OUTPUT_NODE = True + + CATEGORY = "🌏 tinyterra/util" + + @staticmethod + def plant(seed): + return seed, + +class ttN_debugInput: + version = '1.0.0' + @classmethod + def INPUT_TYPES(s): + return {"required": { + "print_to_console": ("BOOLEAN",), + "console_title": ("STRING", {"default": "ttN debug:"}), + "console_color": (["Black", "Red", "Green", "Yellow", "Blue", "Violet", "Cyan", "White", "Grey", "LightRed", "LightGreen", "LightYellow", "LightBlue", "LightViolet", "LightCyan", "LightWhite"], {"default": "Red"}), + }, + "optional": { + "debug": (AnyType("*"), {"default": None}), + } + } + + RETURN_TYPES = tuple() + RETURN_NAMES = tuple() + FUNCTION = "debug" + CATEGORY = "🌏 tinyterra/util" + OUTPUT_NODE = True + + def debug(_, print_to_console, console_title, console_color, debug=None): + + text = str(debug) + if print_to_console: + print(f"{getattr(CC, console_color.upper())}{console_title}\n{text}{CC.CLEAN}") + + return {"ui": {"text": text}, "return": tuple()} + +#---------------------------------------------------------------ttN/util End------------------------------------------------------------------------# + + +#---------------------------------------------------------------ttN/image START---------------------------------------------------------------------# +class ttN_imageREMBG: + version = '1.0.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],{"default": "Preview"}), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_imageREMBG.version}, + } + + + RETURN_TYPES = ("IMAGE", "MASK") + RETURN_NAMES = ("image", "mask") + FUNCTION = "remove_background" + CATEGORY = "🌏 tinyterra/image" + OUTPUT_NODE = True + + def remove_background(self, image, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id): + try: + from rembg import remove + except ImportError: + raise ImportError("REMBG is not installed.\nPlease install it with `pip install rembg` or from https://github.com/danielgatis/rembg.") + + image = remove(ttNsampler.tensor2pil(image)) + tensor = ttNsampler.pil2tensor(image) + + #Get alpha mask + if image.getbands() != ("R", "G", "B", "A"): + image = image.convert("RGBA") + mask = None + if "A" in image.getbands(): + mask = np.array(image.getchannel("A")).astype(np.float32) / 255.0 + mask = torch.from_numpy(mask) + mask = 1. - mask + else: + mask = torch.zeros((64,64), dtype=torch.float32, device=sampler.device) + + if image_output == "Disabled": + results = [] + else: + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + results = ttN_save.images(tensor, save_prefix, image_output) + + if image_output in ("Hide", "Hide/Save"): + return (tensor, mask) + + # Output image results to ui and node outputs + return {"ui": {"images": results}, + "result": (tensor, mask)} + +class ttN_imageOUPUT: + version = '1.2.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],{"default": "Preview"}), + "output_path": ("STRING", {"default": folder_paths.get_output_directory(), "multiline": False}), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "number_padding": (["None", 2, 3, 4, 5, 6, 7, 8, 9],{"default": 5}), + "file_type": (OUTPUT_FILETYPES, {"default": "png"}), + "overwrite_existing": ("BOOLEAN", {"default": False}), + "embed_workflow": ("BOOLEAN", {"default": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_imageOUPUT.version}, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "output" + CATEGORY = "🌏 tinyterra/image" + OUTPUT_NODE = True + + def output(self, image, image_output, output_path, save_prefix, number_padding, file_type, overwrite_existing, embed_workflow, prompt, extra_pnginfo, my_unique_id): + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo, number_padding, overwrite_existing, output_path) + results = ttN_save.images(image, save_prefix, image_output, embed_workflow, file_type) + + if image_output in ("Hide", "Hide/Save"): + return (image,) + + # Output image results to ui and node outputs + return {"ui": {"images": results}, + "result": (image,)} + +class ttN_modelScale: + version = '1.1.0' + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"),), + "vae": ("VAE",), + "image": ("IMAGE",), + "rescale_after_model": ([False, True],{"default": True}), + "rescale_method": (s.upscale_methods,), + "rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect'],), + "percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}), + "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "crop": (s.crop_methods,), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}), + "output_latent": ([False, True],{"default": True}),}, + "hidden": { "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "ttNnodeVersion": ttN_modelScale.version}, + } + + RETURN_TYPES = ("LATENT", "IMAGE",) + RETURN_NAMES = ("latent", 'image',) + + FUNCTION = "upscale" + CATEGORY = "🌏 tinyterra/image" + OUTPUT_NODE = True + + def vae_encode_crop_pixels(self, pixels): + x = (pixels.shape[1] // 8) * 8 + y = (pixels.shape[2] // 8) * 8 + if pixels.shape[1] != x or pixels.shape[2] != y: + x_offset = (pixels.shape[1] % 8) // 2 + y_offset = (pixels.shape[2] % 8) // 2 + pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :] + return pixels + + def upscale(self, model_name, vae, image, rescale_after_model, rescale_method, rescale, percent, width, height, longer_side, crop, image_output, save_prefix, output_latent, prompt=None, extra_pnginfo=None, my_unique_id=None): + # Load Model + upscale_model = comfy_extras.nodes_upscale_model.UpscaleModelLoader().load_model(model_name)[0] + + # Model upscale + s = comfy_extras.nodes_upscale_model.ImageUpscaleWithModel().upscale(upscale_model, image)[0] + + # Post Model Rescale + if rescale_after_model == True: + samples = s.movedim(-1, 1) + orig_height = samples.shape[2] + orig_width = samples.shape[3] + if rescale == "by percentage" and percent != 0: + height = percent / 100 * orig_height + width = percent / 100 * orig_width + if (width > MAX_RESOLUTION): + width = MAX_RESOLUTION + if (height > MAX_RESOLUTION): + height = MAX_RESOLUTION + + width = ttNsampler.enforce_mul_of_64(width) + height = ttNsampler.enforce_mul_of_64(height) + elif rescale == "to longer side - maintain aspect": + longer_side = ttNsampler.enforce_mul_of_64(longer_side) + if orig_width > orig_height: + width, height = longer_side, ttNsampler.enforce_mul_of_64(longer_side * orig_height / orig_width) + else: + width, height = ttNsampler.enforce_mul_of_64(longer_side * orig_width / orig_height), longer_side + + + s = comfy.utils.common_upscale(samples, width, height, rescale_method, crop) + s = s.movedim(1,-1) + + # vae encode + if output_latent == True: + pixels = self.vae_encode_crop_pixels(s) + t = vae.encode(pixels[:,:,:,:3]) + if image_output == "return latent": + return ({"samples":t}) + else: + t = None + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + results = ttN_save.images(s, save_prefix, image_output) + + if image_output in ("Hide", "Hide/Save"): + return ({"samples":t}, s,) + + return {"ui": {"images": results}, + "result": ({"samples":t}, s,)} + +#---------------------------------------------------------------ttN/image END-----------------------------------------------------------------------# + +TTN_VERSIONS = { + "tinyterraNodes": ttN_version, + "pipeLoader_v2": ttN_pipeLoader_v2.version, + "tinyKSampler": ttN_KSampler_v2.version, + "tinyLoader": ttN_tinyLoader.version, + "tinyConditioning": ttN_conditioning.version, + "pipeKSampler_v2": ttN_pipeKSampler_v2.version, + "pipeKSamplerAdvanced_v2": ttN_pipeKSamplerAdvanced_v2.version, + "pipeLoaderSDXL_v2": ttN_pipeLoaderSDXL_v2.version, + "pipeKSamplerSDXL_v2": ttN_pipeKSamplerSDXL_v2.version, + "pipeEDIT": ttN_pipe_EDIT.version, + "pipe2BASIC": ttN_pipe_2BASIC.version, + "pipe2DETAILER": ttN_pipe_2DETAILER.version, + "advanced xyPlot": ttN_advanced_XYPlot.version, + 'advPlot images': ttN_advPlot_images.version, + "advPlot range": ttN_advPlot_range.version, + "advPlot string": ttN_advPlot_string.version, + "advPlot combo": ttN_advPlot_combo.version, + "advPlot merge": ttN_advPlot_merge.version, + "pipeEncodeConcat": ttN_pipeEncodeConcat.version, + "multiLoraStack": ttN_pipeLoraStack.version, + "multiModelMerge": ttN_multiModelMerge.version, + "debugInput": ttN_debugInput.version, + "text": ttN_text.version, + "textDebug": ttN_textDebug.version, + "concat": ttN_concat.version, + "text3BOX_3WAYconcat": ttN_text3BOX_3WAYconcat.version, + "text7BOX_concat": ttN_text7BOX_concat.version, + "textCycleLine": ttN_textCycleLine.version, + "textOutput": ttN_textOUPUT.version, + "imageOutput": ttN_imageOUPUT.version, + "imageREMBG": ttN_imageREMBG.version, + "hiresfixScale": ttN_modelScale.version, + "int": ttN_INT.version, + "float": ttN_FLOAT.version, + "seed": ttN_SEED.version +} +NODE_CLASS_MAPPINGS = { + #ttN/base + "ttN tinyLoader": ttN_tinyLoader, + "ttN conditioning": ttN_conditioning, + "ttN KSampler_v2": ttN_KSampler_v2, + + #ttN/pipe + "ttN pipeLoader_v2": ttN_pipeLoader_v2, + "ttN pipeKSampler_v2": ttN_pipeKSampler_v2, + "ttN pipeKSamplerAdvanced_v2": ttN_pipeKSamplerAdvanced_v2, + "ttN pipeLoaderSDXL_v2": ttN_pipeLoaderSDXL_v2, + "ttN pipeKSamplerSDXL_v2": ttN_pipeKSamplerSDXL_v2, + "ttN advanced xyPlot": ttN_advanced_XYPlot, + "ttN advPlot images": ttN_advPlot_images, + "ttN advPlot range": ttN_advPlot_range, + "ttN advPlot string": ttN_advPlot_string, + "ttN advPlot combo": ttN_advPlot_combo, + "ttN advPlot merge": ttN_advPlot_merge, + "ttN pipeEDIT": ttN_pipe_EDIT, + "ttN pipe2BASIC": ttN_pipe_2BASIC, + "ttN pipe2DETAILER": ttN_pipe_2DETAILER, + "ttN pipeEncodeConcat": ttN_pipeEncodeConcat, + "ttN pipeLoraStack": ttN_pipeLoraStack, + + #ttN/misc + "ttN multiModelMerge": ttN_multiModelMerge, + "ttN debugInput": ttN_debugInput, + + #ttN/text + "ttN text": ttN_text, + "ttN textDebug": ttN_textDebug, + "ttN concat": ttN_concat, + "ttN text3BOX_3WAYconcat": ttN_text3BOX_3WAYconcat, + "ttN text7BOX_concat": ttN_text7BOX_concat, + "ttN textCycleLine": ttN_textCycleLine, + "ttN textOutput": ttN_textOUPUT, + + #ttN/image + "ttN imageOutput": ttN_imageOUPUT, + "ttN imageREMBG": ttN_imageREMBG, + "ttN hiresfixScale": ttN_modelScale, + + #ttN/util + "ttN int": ttN_INT, + "ttN float": ttN_FLOAT, + "ttN seed": ttN_SEED, +} +NODE_DISPLAY_NAME_MAPPINGS = { + #ttN/base + "ttN tinyLoader": "tinyLoader", + "ttN conditioning": "tinyConditioning", + "ttN KSampler_v2": "tinyKSampler", + + #ttN/pipe + "ttN pipeLoader_v2": "pipeLoader", + "ttN pipeKSampler_v2": "pipeKSampler", + "ttN pipeKSamplerAdvanced_v2": "pipeKSamplerAdvanced", + "ttN pipeLoaderSDXL_v2": "pipeLoaderSDXL", + "ttN pipeKSamplerSDXL_v2": "pipeKSamplerSDXL", + "ttN pipeEDIT": "pipeEDIT", + "ttN pipe2BASIC": "pipe > basic_pipe", + "ttN pipe2DETAILER": "pipe > detailer_pipe", + "ttN pipeEncodeConcat": "pipeEncodeConcat", + "ttN pipeLoraStack": "pipeLoraStack", + + #ttN/xyPlot + "ttN advanced xyPlot": "advanced xyPlot", + "ttN advPlot images": "advPlot images", + "ttN advPlot range": "advPlot range", + "ttN advPlot string": "advPlot string", + "ttN advPlot combo": "advPlot combo", + "ttN advPlot merge": "advPlot merge", + + #ttN/misc + "ttN multiModelMerge": "multiModelMerge", + "ttN debugInput": "debugInput", + + #ttN/text + "ttN text": "text", + "ttN textDebug": "textDebug", + "ttN concat": "textConcat", + "ttN text7BOX_concat": "7x TXT Loader Concat", + "ttN text3BOX_3WAYconcat": "3x TXT Loader MultiConcat", + "ttN textCycleLine": "textCycleLine", + "ttN textOutput": "textOutput", + + #ttN/image + "ttN imageREMBG": "imageRemBG", + "ttN imageOutput": "imageOutput", + "ttN hiresfixScale": "hiresfixScale", + + #ttN/util + "ttN int": "int", + "ttN float": "float", + "ttN seed": "seed", +} + +ttNl('Loaded').full().p() + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# (upscale from QualityOfLifeSuite_Omar92) - https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92 # +# (Node weights from BlenderNeko/ComfyUI_ADV_CLIP_emb) - https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb # +#---------------------------------------------------------------------------------------------------------------------------------------------------# diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNexecutor.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNexecutor.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2a0de48bf237885b8cfdc692020e798b350da4 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNexecutor.py @@ -0,0 +1,510 @@ +import nodes +import torch +import comfy.model_management +import copy +import logging +import sys +import traceback +from execution import full_type_name + +def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): + valid_inputs = class_def.INPUT_TYPES() + input_data_all = {} + for x in inputs: + input_data = inputs[x] + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id not in outputs: + input_data_all[x] = (None,) + continue + obj = outputs[input_unique_id][output_index] + input_data_all[x] = obj + else: + if ("required" in valid_inputs and x in valid_inputs["required"]) or ("optional" in valid_inputs and x in valid_inputs["optional"]): + input_data_all[x] = [input_data] + + if "hidden" in valid_inputs: + h = valid_inputs["hidden"] + for x in h: + if h[x] == "PROMPT": + input_data_all[x] = [prompt] + if h[x] == "EXTRA_PNGINFO": + input_data_all[x] = [extra_data.get('extra_pnginfo', None)] + if h[x] == "UNIQUE_ID": + input_data_all[x] = [unique_id] + return input_data_all + +def get_output_data(obj, input_data_all): + results = [] + uis = [] + return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) + for r in return_values: + if isinstance(r, dict): + if 'ui' in r: + uis.append(r['ui']) + if 'result' in r: + results.append(r['result']) + else: + results.append(r) + output = [] + if len(results) > 0: + # check which outputs need concatenating + # Handle both old tuples and new NodeOutput objects + first_result = results[0] + try: + # Try to get length directly (works for tuples/lists) + result_len = len(first_result) + except TypeError: + # If len() fails, it's likely a NodeOutput - convert it + try: + first_result = tuple(first_result) + results[0] = first_result + result_len = len(first_result) + except: + # Single value output + result_len = 1 + results[0] = (first_result,) + + output_is_list = [False] * result_len + + if hasattr(obj, "OUTPUT_IS_LIST"): + output_is_list = obj.OUTPUT_IS_LIST + # merge node execution results + for i, is_list in zip(range(len(results[0])), output_is_list): + if is_list: + output.append([x for o in results for x in o[i]]) + else: + output.append([o[i] for o in results]) + ui = dict() + if len(uis) > 0: + ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()} + return output, ui + +class ttN_advanced_XYPlot: + version = '1.1.0' + plotPlaceholder = "_PLOT\nExample:\n\n\n[node_ID:widget_Name='value']\n\n\n[node_ID:widget_Name='value2']\n[node_ID:widget2_Name='value']\n[node_ID2:widget_Name='value']\n\netc..." + + def get_plot_points(plot_data, unique_id): + if plot_data is None or plot_data.strip() == '': + return None + else: + try: + axis_dict = {} + lines = plot_data.split('<') + new_lines = [] + temp_line = '' + + for line in lines: + if line.startswith('lora'): + temp_line += '<' + line + new_lines[-1] = temp_line + else: + new_lines.append(line) + temp_line = line + + for line in new_lines: + if line: + values_label = [] + line = line.split('>', 1) + num, label = line[0].split(':', 1) + axis_dict[num] = {"label": label} + for point in line[1].split('['): + if point.strip() != '': + node_id = point.split(':', 1)[0] + axis_dict[num][node_id] = {} + input_name = point.split(':', 1)[1].split('=')[0] + value = point.split("'")[1].split("'")[0] + values_label.append((value, input_name, node_id)) + + axis_dict[num][node_id][input_name] = value + + if label in ['v_label', 'tv_label', 'idtv_label']: + new_label = [] + for value, input_name, node_id in values_label: + if label == 'v_label': + new_label.append(value) + elif label == 'tv_label': + new_label.append(f'{input_name}: {value}') + elif label == 'idtv_label': + new_label.append(f'[{node_id}] {input_name}: {value}') + axis_dict[num]['label'] = ', '.join(new_label) + + except ValueError: + ttNl('Invalid Plot - defaulting to None...').t(f'advanced_XYPlot[{unique_id}]').warn().p() + return None + return axis_dict + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}), + "save_individuals": ("BOOLEAN", {"default": False}), + "flip_xy": ("BOOLEAN", {"default": False}), + + "x_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'X' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + "y_plot": ("STRING",{"default": '', "multiline": True, "placeholder": 'Y' + ttN_advanced_XYPlot.plotPlaceholder, "pysssss.autocomplete": False}), + }, + "hidden": { + "prompt": ("PROMPT",), + "extra_pnginfo": ("EXTRA_PNGINFO",), + "my_unique_id": ("MY_UNIQUE_ID",), + "ttNnodeVersion": ttN_advanced_XYPlot.version, + }, + } + + RETURN_TYPES = ("ADV_XYPLOT", ) + RETURN_NAMES = ("adv_xyPlot", ) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/xyPlot" + + def plot(self, grid_spacing, save_individuals, flip_xy, x_plot=None, y_plot=None, prompt=None, extra_pnginfo=None, my_unique_id=None): + x_plot = ttN_advanced_XYPlot.get_plot_points(x_plot, my_unique_id) + y_plot = ttN_advanced_XYPlot.get_plot_points(y_plot, my_unique_id) + + if x_plot == {}: + x_plot = None + if y_plot == {}: + y_plot = None + + if flip_xy == "True": + x_plot, y_plot = y_plot, x_plot + + xy_plot = {"x_plot": x_plot, + "y_plot": y_plot, + "grid_spacing": grid_spacing, + "save_individuals": save_individuals,} + + return (xy_plot, ) + +class ttN_Plotting(ttN_advanced_XYPlot): + def plot(self, **args): + xy_plot = None + return (xy_plot, ) + + +def map_node_over_list(obj, input_data_all, func, allow_interrupt=False): + # check if node wants the lists + input_is_list = False + if hasattr(obj, "INPUT_IS_LIST"): + input_is_list = obj.INPUT_IS_LIST + + if len(input_data_all) == 0: + max_len_input = 0 + else: + max_len_input = max([len(x) for x in input_data_all.values()]) + + # get a slice of inputs, repeat last input when list isn't long enough + def slice_dict(d, i): + d_new = dict() + for k,v in d.items(): + d_new[k] = v[i if len(v) > i else -1] + return d_new + + results = [] + if input_is_list: + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)(**input_data_all)) + elif max_len_input == 0: + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)()) + else: + for i in range(max_len_input): + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)(**slice_dict(input_data_all, i))) + return results + +def format_value(x): + if x is None: + return None + elif isinstance(x, (int, float, bool, str)): + return x + else: + return str(x) + +def recursive_execute(prompt, outputs, current_item, extra_data, executed, prompt_id, outputs_ui, object_storage): + unique_id = current_item + inputs = prompt[unique_id]['inputs'] + class_type = prompt[unique_id]['class_type'] + if class_type == "ttN advanced xyPlot": + class_def = ttN_Plotting #Fake class to avoid recursive execute of xy_plot node + else: + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + + if unique_id in outputs: + print('returning already executed', unique_id) + return (True, None, None) + + for x in inputs: + input_data = inputs[x] + + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id not in outputs: + result = recursive_execute(prompt, outputs, input_unique_id, extra_data, executed, prompt_id, outputs_ui, object_storage) + if result[0] is not True: + # Another node failed further upstream + return result + + input_data_all = None + try: + input_data_all = get_input_data(inputs, class_def, unique_id, outputs, prompt, extra_data) + + obj = object_storage.get((unique_id, class_type), None) + if obj is None: + obj = class_def() + object_storage[(unique_id, class_type)] = obj + + output_data, output_ui = get_output_data(obj, input_data_all) + outputs[unique_id] = output_data + if len(output_ui) > 0: + outputs_ui[unique_id] = output_ui + + except comfy.model_management.InterruptProcessingException as iex: + logging.info("Processing interrupted") + + # skip formatting inputs/outputs + error_details = { + "node_id": unique_id, + } + + return (False, error_details, iex) + except Exception as ex: + typ, _, tb = sys.exc_info() + exception_type = full_type_name(typ) + input_data_formatted = {} + if input_data_all is not None: + input_data_formatted = {} + for name, inputs in input_data_all.items(): + input_data_formatted[name] = [format_value(x) for x in inputs] + + output_data_formatted = {} + for node_id, node_outputs in outputs.items(): + output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs] + + logging.error(f"!!! Exception during xyPlot processing!!! {ex}") + logging.error(traceback.format_exc()) + + error_details = { + "node_id": unique_id, + "exception_message": str(ex), + "exception_type": exception_type, + "traceback": traceback.format_tb(tb), + "current_inputs": input_data_formatted, + "current_outputs": output_data_formatted + } + return (False, error_details, ex) + + executed.add(unique_id) + + return (True, None, None) + +def recursive_will_execute(prompt, outputs, current_item, memo={}): + unique_id = current_item + + if unique_id in memo: + return memo[unique_id] + + inputs = prompt[unique_id]['inputs'] + will_execute = [] + if unique_id in outputs: + return [] + + for x in inputs: + input_data = inputs[x] + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id not in outputs: + will_execute += recursive_will_execute(prompt, outputs, input_unique_id, memo) + + memo[unique_id] = will_execute + [unique_id] + return memo[unique_id] + +def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item): + unique_id = current_item + inputs = prompt[unique_id]['inputs'] + class_type = prompt[unique_id]['class_type'] + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + + is_changed_old = '' + is_changed = '' + to_delete = False + if hasattr(class_def, 'IS_CHANGED'): + if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]: + is_changed_old = old_prompt[unique_id]['is_changed'] + if 'is_changed' not in prompt[unique_id]: + input_data_all = get_input_data(inputs, class_def, unique_id, outputs) + if input_data_all is not None: + try: + #is_changed = class_def.IS_CHANGED(**input_data_all) + is_changed = map_node_over_list(class_def, input_data_all, "IS_CHANGED") + prompt[unique_id]['is_changed'] = is_changed + except: + to_delete = True + else: + is_changed = prompt[unique_id]['is_changed'] + + if unique_id not in outputs: + return True + + if not to_delete: + if is_changed != is_changed_old: + to_delete = True + elif unique_id not in old_prompt: + to_delete = True + elif inputs == old_prompt[unique_id]['inputs']: + for x in inputs: + input_data = inputs[x] + + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id in outputs: + to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id) + else: + to_delete = True + if to_delete: + break + else: + to_delete = True + + if to_delete: + d = outputs.pop(unique_id) + del d + return to_delete + + +class xyExecutor: + def __init__(self): + self.reset() + + def reset(self): + self.outputs = {} + self.object_storage = {} + self.outputs_ui = {} + self.status_messages = [] + self.success = True + self.old_prompt = {} + + def add_message(self, event, data, broadcast: bool): + self.status_messages.append((event, data)) + + def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex): + node_id = error["node_id"] + class_type = prompt[node_id]["class_type"] + + # First, send back the status to the frontend depending + # on the exception type + if isinstance(ex, comfy.model_management.InterruptProcessingException): + mes = { + "prompt_id": prompt_id, + "node_id": node_id, + "node_type": class_type, + "executed": list(executed), + } + self.add_message("execution_interrupted", mes, broadcast=True) + else: + mes = { + "prompt_id": prompt_id, + "node_id": node_id, + "node_type": class_type, + "executed": list(executed), + + "exception_message": error["exception_message"], + "exception_type": error["exception_type"], + "traceback": error["traceback"], + "current_inputs": error["current_inputs"], + "current_outputs": error["current_outputs"], + } + self.add_message("execution_error", mes, broadcast=False) + + # Next, remove the subsequent outputs since they will not be executed + to_delete = [] + for o in self.outputs: + if (o not in current_outputs) and (o not in executed): + to_delete += [o] + if o in self.old_prompt: + d = self.old_prompt.pop(o) + del d + for o in to_delete: + d = self.outputs.pop(o) + del d + + raise Exception(ex) + + def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): + nodes.interrupt_processing(False) + + self.status_messages = [] + self.add_message("execution_start", { "prompt_id": prompt_id}, broadcast=False) + + with torch.inference_mode(): + #delete cached outputs if nodes don't exist for them + to_delete = [] + for o in self.outputs: + if o not in prompt: + to_delete += [o] + for o in to_delete: + d = self.outputs.pop(o) + del d + to_delete = [] + for o in self.object_storage: + if o[0] not in prompt: + to_delete += [o] + else: + p = prompt[o[0]] + if o[1] != p['class_type']: + to_delete += [o] + for o in to_delete: + d = self.object_storage.pop(o) + del d + + for x in prompt: + recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x) + + current_outputs = set(self.outputs.keys()) + for x in list(self.outputs_ui.keys()): + if x not in current_outputs: + d = self.outputs_ui.pop(x) + del d + + comfy.model_management.cleanup_models() + self.add_message("execution_cached", + { "nodes": list(current_outputs) , "prompt_id": prompt_id}, + broadcast=False) + executed = set() + output_node_id = None + to_execute = [] + + for node_id in list(execute_outputs): + to_execute += [(0, node_id)] + + while len(to_execute) > 0: + #always execute the output that depends on the least amount of unexecuted nodes first + memo = {} + to_execute = sorted(list(map(lambda a: (len(recursive_will_execute(prompt, self.outputs, a[-1], memo)), a[-1]), to_execute))) + output_node_id = to_execute.pop(0)[-1] + + # This call shouldn't raise anything if there's an error deep in + # the actual SD code, instead it will report the node where the + # error was raised + self.success, error, ex = recursive_execute(prompt, self.outputs, output_node_id, extra_data, executed, prompt_id, self.outputs_ui, self.object_storage) + if self.success is not True: + self.handle_execution_error(prompt_id, prompt, current_outputs, executed, error, ex) + break + + for x in executed: + self.old_prompt[x] = copy.deepcopy(prompt[x]) + + if comfy.model_management.DISABLE_SMART_MEMORY: + comfy.model_management.unload_all_models() diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNlegacyNodes.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNlegacyNodes.py new file mode 100644 index 0000000000000000000000000000000000000000..45476d470d6fdc025f90034ef44972b3d3c0f3be --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNlegacyNodes.py @@ -0,0 +1,2078 @@ +import folder_paths +import os +import re +import json +import torch +import random +import datetime +from pathlib import Path +from urllib.request import urlopen +from typing import Dict, List, Optional, Tuple, Union, Any + +from PIL.PngImagePlugin import PngInfo +from PIL import Image, ImageDraw, ImageFont +import numpy as np +import hashlib + +import comfy.samplers +import latent_preview +from comfy.sd import CLIP, VAE +from .adv_encode import advanced_encode +from .utils import CC, ttNl, ttNpaths +from comfy.model_patcher import ModelPatcher +from nodes import MAX_RESOLUTION, ControlNetApplyAdvanced + + +class ttNloader: + def __init__(self): + self.loraDict = {lora.split('\\')[-1]: lora for lora in folder_paths.get_filename_list("loras")} + + @staticmethod + def nsp_parse(text, seed=0, noodle_key='__', nspterminology=None, pantry_path=None, title=None, my_unique_id=None): + if "__" not in text: + return text + + if nspterminology is None: + # Fetch the NSP Pantry + if pantry_path is None: + pantry_path = os.path.join(ttNpaths.tinyterraNodes, 'nsp_pantry.json') + if not os.path.exists(pantry_path): + response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') + tmp_pantry = json.loads(response.read()) + # Dump JSON locally + pantry_serialized = json.dumps(tmp_pantry, indent=4) + with open(pantry_path, "w") as f: + f.write(pantry_serialized) + del response, tmp_pantry + + # Load local pantry + with open(pantry_path, 'r') as f: + nspterminology = json.load(f) + + if seed > 0 or seed < 0: + random.seed(seed) + + # Parse Text + new_text = text + for term in nspterminology: + # Target Noodle + tkey = f'{noodle_key}{term}{noodle_key}' + # How many occurrences? + tcount = new_text.count(tkey) + + if tcount > 0: + nsp_parsed = True + + # Apply random results for each noodle counted + for _ in range(tcount): + new_text = new_text.replace( + tkey, random.choice(nspterminology[term]), 1) + seed += 1 + random.seed(seed) + + ttNl(new_text).t(f'{title}[{my_unique_id}]').p() + + + return new_text + + @staticmethod + def clean_values(values: str): + original_values = values.split("; ") + cleaned_values = [] + + for value in original_values: + cleaned_value = value.strip(';').strip() + if cleaned_value: + try: + cleaned_value = int(cleaned_value) + except ValueError: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + + cleaned_values.append(cleaned_value) + return cleaned_values + + @staticmethod + def string_to_seed(s): + h = hashlib.sha256(s.encode()).digest() + return (int.from_bytes(h, byteorder='big') & 0xffffffffffffffff) + + def load_checkpoint(self, ckpt_name, config_name=None, clip_skip=0): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + if config_name not in [None, "Default"]: + config_path = folder_paths.get_full_path("configs", config_name) + loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + else: + loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + clip = loaded_ckpt[1].clone() + if clip_skip != 0: + clip.clip_layer(clip_skip) + + # model, clip, vae + return loaded_ckpt[0], clip, loaded_ckpt[2] + + def load_unclip(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return out + + def load_vae(self, vae_name): + vae_path = folder_paths.get_full_path("vae", vae_name) + sd = comfy.utils.load_torch_file(vae_path) + loaded_vae = comfy.sd.VAE(sd=sd) + + return loaded_vae + + def load_controlNet(self, positive, negative, controlnet_name, image, strength, start_percent, end_percent): + if type(controlnet_name) == str: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_name) + controlnet = comfy.controlnet.load_controlnet(controlnet_path) + else: + controlnet = controlnet_name + + controlnet_conditioning = ControlNetApplyAdvanced().apply_controlnet(positive, negative, controlnet, image, strength, start_percent, end_percent) + base_positive, base_negative = controlnet_conditioning[0], controlnet_conditioning[1] + return base_positive, base_negative + + def load_lora(self, lora_name, model, clip, strength_model, strength_clip): + if strength_model == 0 and strength_clip == 0: + return (model, clip) + + #print('LORA NAME', lora_name) + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None or not os.path.exists(lora_path): + ttNl(f'{lora_path}').t("Skipping missing lora").error().p() + return (model, clip) + + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + + return model_lora, clip_lora + + def validate_lora_format(self, lora_string): + if not re.match(r'^$', lora_string): + ttNl(f'{lora_string}').t("Skipping invalid lora format").error().p() + return None + return lora_string + + def parse_lora_string(self, lora_string): + # Remove '' from the end, then split by ':' + parts = lora_string[6:-1].split(':') # 6 is the length of ' 0 else None + lora_name = self.loraDict.get(lora_name, lora_name) + weight1 = float(parts[1]) if len(parts) > 1 else None + weight2 = float(parts[2]) if len(parts) > 2 else weight1 + return lora_name, weight1, weight2 + + def load_lora_text(self, loras, model, clip): + # Extract potential patterns + pattern = r']+>' + matches = re.findall(pattern, loras) + + # Validate each extracted pattern + for match in matches: + match = self.validate_lora_format(match) + if match is not None: + lora_name, weight1, weight2 = self.parse_lora_string(match) + model, clip = self.load_lora(lora_name, model, clip, weight1, weight2) + + return model, clip + + def embedding_encode(self, text, token_normalization, weight_interpretation, clip, seed=None, title=None, my_unique_id=None, prepend_text=None): + text = f'{prepend_text} {text}' if prepend_text is not None else text + if seed is None: + seed = self.string_to_seed(text) + + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + embedding, pooled = advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled='enable') + return [[embedding, {"pooled_output": pooled}]] + + def embedding_encodeXL(self, text, clip, seed=0, title=None, my_unique_id=None, prepend_text=None, text2=None, prepend_text2=None, width=None, height=None, crop_width=0, crop_height=0, target_width=None, target_height=None, refiner_clip=None, ascore=None): + text = f'{prepend_text} {text}' if prepend_text is not None else text + text = self.nsp_parse(text, seed, title=title, my_unique_id=my_unique_id) + + target_width = target_width if target_width is not None else width + target_height = target_height if target_height is not None else height + + if text2 is not None and refiner_clip is not None: + text2 = f'{prepend_text2} {text2}' if prepend_text2 is not None else text2 + text2 = self.nsp_parse(text2, seed, title=title, my_unique_id=my_unique_id) + + tokens_refiner = refiner_clip.tokenize(text2) + cond_refiner, pooled_refiner = refiner_clip.encode_from_tokens(tokens_refiner, return_pooled=True) + refiner_conditioning = [[cond_refiner, {"pooled_output": pooled_refiner, "aesthetic_score": ascore, "width": width,"height": height}]] + else: + refiner_conditioning = None + + if text2 is None or text2.strip() == '': + text2 = text + + tokens = clip.tokenize(text) + tokens["l"] = clip.tokenize(text2)["l"] + if len(tokens["l"]) != len(tokens["g"]): + empty = clip.tokenize("") + while len(tokens["l"]) < len(tokens["g"]): + tokens["l"] += empty["l"] + while len(tokens["l"]) > len(tokens["g"]): + tokens["g"] += empty["g"] + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + conditioning = [[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_width, "crop_h": crop_height, "target_width": target_width, "target_height": target_height}]] + + + + return conditioning, refiner_conditioning + + def load_main3(self, ckpt_name, config_name, vae_name, loras, clip_skip, model_override=None, clip_override=None, optional_lora_stack=None): + # Load models + if (model_override is not None) and (clip_override is not None) and (vae_name != "Baked VAE"): + model, clip, vae = None, None, None + else: + model, clip, vae = self.load_checkpoint(ckpt_name, config_name, clip_skip) + + if model_override is not None: + model = model_override + del model_override + + if clip_override is not None: + clip = clip_override.clone() + + if clip_skip != 0: + clip.clip_layer(clip_skip) + del clip_override + + if vae_name != "Baked VAE": + vae = self.load_vae(vae_name) + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = self.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if loras not in [None, "None"]: + model, clip = self.load_lora_text(loras, model, clip) + + if not clip: + raise Exception("No CLIP found") + + return model, clip, vae + +class ttNsampler: + def __init__(self): + self.last_helds: dict[str, list] = { + "results": [], + "pipe_line": [], + } + self.device = comfy.model_management.intermediate_device() + + @staticmethod + def tensor2pil(image: torch.Tensor) -> Image.Image: + """Convert a torch tensor to a PIL image.""" + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + + @staticmethod + def pil2tensor(image: Image.Image) -> torch.Tensor: + """Convert a PIL image to a torch tensor.""" + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + @staticmethod + def enforce_mul_of_64(d): + d = int(d) + if d<=7: + d = 8 + leftover = d % 8 # 8 is the number of pixels per byte + if leftover != 0: # if the number of pixels is not a multiple of 8 + if (leftover < 4): # if the number of pixels is less than 4 + d -= leftover # remove the leftover pixels + else: # if the number of pixels is more than 4 + d += 8 - leftover # add the leftover pixels + + return int(d) + + @staticmethod + def safe_split(to_split: str, delimiter: str) -> List[str]: + """Split the input string and return a list of non-empty parts.""" + parts = to_split.split(delimiter) + parts = [part for part in parts if part not in ('', ' ', ' ')] + + while len(parts) < 2: + parts.append('None') + return parts + + def emptyLatent(self, empty_latent_aspect: str, batch_size:int, width:int = None, height:int = None) -> torch.Tensor: + if empty_latent_aspect and empty_latent_aspect != "width x height [custom]": + width, height = empty_latent_aspect.replace(' ', '').split('[')[0].split('x') + + latent = torch.zeros([batch_size, 4, int(height) // 8, int(width) // 8], device=self.device) + return latent + + def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): + device = comfy.model_management.get_torch_device() + latent_image = latent["samples"] + + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + batch_inds = latent["batch_index"] if "batch_index" in latent else None + noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + preview_format = "JPEG" + if preview_format not in ["JPEG", "PNG"]: + preview_format = "JPEG" + + previewer = False + + if preview_latent: + previewer = latent_preview.get_previewer(device, model.model.latent_format) + + pbar = comfy.utils.ProgressBar(steps) + def callback(step, x0, x, total_steps): + preview_bytes = None + if previewer: + preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) + pbar.update_absolute(step + 1, total_steps, preview_bytes) + + samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, + force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + + out = latent.copy() + out["samples"] = samples + return out + + def process_hold_state(self, pipe, image_output, my_unique_id, sdxl=False): + title = f'pipeKSampler[{my_unique_id}]' if not sdxl else f'pipeKSamplerSDXL[{my_unique_id}]' + ttNl('Held').t(title).p() + + last_pipe = self.init_state(my_unique_id, "pipe_line", pipe) if not sdxl else self.init_state(my_unique_id, "pipe_line_sdxl", pipe) + + last_results = self.init_state(my_unique_id, "results", list()) + + output = self.get_output(last_pipe) if not sdxl else self.get_output_sdxl_v2(last_pipe) + + if image_output in ("Hide", "Hide/Save", "Disabled"): + return output + + return {"ui": {"images": last_results}, "result": output} + + def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: + """Retrieve value by its associated ID.""" + try: + for value, id_ in self.last_helds[key]: + if id_ == my_unique_id: + return value + except KeyError: + return None + + def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: + """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" + try: + for i, (value, id_) in enumerate(self.last_helds[key]): + if id_ == my_unique_id: + self.last_helds[key][i] = (new_value, id_) + return True + self.last_helds[key].append((new_value, my_unique_id)) + return False + except KeyError: + return False + + def upscale(self, samples, upscale_method, scale_by, crop): + s = samples.copy() + width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) + height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) + + if (width > MAX_RESOLUTION): + width = MAX_RESOLUTION + if (height > MAX_RESOLUTION): + height = MAX_RESOLUTION + + s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) + return (s,) + + def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: + """Upscale the samples if the upscale_method is not set to 'None'.""" + if upscale_method != "None": + samples = self.upscale(samples, upscale_method, factor, crop)[0] + return samples + + def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: + """Initialize the state by either fetching the stored value or setting a default.""" + value = self.get_value_by_id(key, my_unique_id) + if value is not None: + return value + return default + + def get_output(self, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input pipe dictionary.""" + return ( + pipe, + pipe.get("model"), + pipe.get("positive"), + pipe.get("negative"), + pipe.get("samples"), + pipe.get("vae"), + pipe.get("clip"), + pipe.get("images"), + pipe.get("seed") + ) + + def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" + return ( + sdxl_pipe, + sdxl_pipe.get("model"), + sdxl_pipe.get("positive"), + sdxl_pipe.get("negative"), + sdxl_pipe.get("vae"), + sdxl_pipe.get("refiner_model"), + sdxl_pipe.get("refiner_positive"), + sdxl_pipe.get("refiner_negative"), + sdxl_pipe.get("refiner_vae"), + sdxl_pipe.get("samples"), + sdxl_pipe.get("clip"), + sdxl_pipe.get("images"), + sdxl_pipe.get("seed") + ) + + def get_output_sdxl_v2(self, sdxl_pipe: dict, pipe: dict) -> Tuple: + """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" + return ( + sdxl_pipe, + pipe, + sdxl_pipe.get("model"), + sdxl_pipe.get("positive"), + sdxl_pipe.get("negative"), + sdxl_pipe.get("refiner_model"), + sdxl_pipe.get("refiner_positive"), + sdxl_pipe.get("refiner_negative"), + sdxl_pipe.get("samples"), + sdxl_pipe.get("vae"), + sdxl_pipe.get("clip"), + sdxl_pipe.get("images"), + sdxl_pipe.get("seed") + ) + +class ttNsave: + def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): + self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None + self.overwrite_existing = overwrite_existing + self.my_unique_id = my_unique_id + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.type = 'temp' + self.output_dir = output_dir + if self.output_dir != folder_paths.get_temp_directory(): + self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) + if not os.path.exists(self.output_dir): + self._create_directory(self.output_dir) + + @staticmethod + def _create_directory(folder: str): + """Try to create the directory and log the status.""" + ttNl(f"Folder {folder} does not exist. Attempting to create...").warn().p() + if not os.path.exists(folder): + try: + os.makedirs(folder) + ttNl(f"{folder} Created Successfully").success().p() + except OSError: + ttNl(f"Failed to create folder {folder}").error().p() + pass + + @staticmethod + def _map_filename(filename: str, filename_prefix: str) -> Tuple[int, str, Optional[int]]: + """Utility function to map filename to its parts.""" + + # Get the prefix length and extract the prefix + prefix_len = len(os.path.basename(filename_prefix)) + prefix = filename[:prefix_len] + + # Search for the primary digits + digits = re.search(r'(\d+)', filename[prefix_len:]) + + # Search for the number in brackets after the primary digits + group_id = re.search(r'\((\d+)\)', filename[prefix_len:]) + + return (int(digits.group()) if digits else 0, prefix, int(group_id.group(1)) if group_id else 0) + + @staticmethod + def _format_date(text: str, date: datetime.datetime) -> str: + """Format the date according to specific patterns.""" + date_formats = { + 'd': lambda d: d.day, + 'dd': lambda d: '{:02d}'.format(d.day), + 'M': lambda d: d.month, + 'MM': lambda d: '{:02d}'.format(d.month), + 'h': lambda d: d.hour, + 'hh': lambda d: '{:02d}'.format(d.hour), + 'm': lambda d: d.minute, + 'mm': lambda d: '{:02d}'.format(d.minute), + 's': lambda d: d.second, + 'ss': lambda d: '{:02d}'.format(d.second), + 'y': lambda d: d.year, + 'yy': lambda d: str(d.year)[2:], + 'yyy': lambda d: str(d.year)[1:], + 'yyyy': lambda d: d.year, + } + + # We need to sort the keys in reverse order to ensure we match the longest formats first + for format_str in sorted(date_formats.keys(), key=len, reverse=True): + if format_str in text: + text = text.replace(format_str, str(date_formats[format_str](date))) + return text + + @staticmethod + def _gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '', collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[str, Union[str, List[str]]]: + """Recursively gather all inputs from the prompt dictionary.""" + if prompt == None: + return None + + collected_inputs = collected_inputs or {} + prompt_inputs = prompt[str(unique_id)]["inputs"] + + for p_input, p_input_value in prompt_inputs.items(): + a_input = f"{linkInput}>{p_input}" if linkInput else p_input + + if isinstance(p_input_value, list): + ttNsave._gather_all_inputs(prompt, p_input_value[0], a_input, collected_inputs) + else: + existing_value = collected_inputs.get(a_input) + if existing_value is None: + collected_inputs[a_input] = p_input_value + elif p_input_value not in existing_value: + collected_inputs[a_input] = existing_value + "; " + p_input_value + + return collected_inputs + + @staticmethod + def _get_filename_with_padding(output_dir, filename, number_padding, group_id, ext): + """Return filename with proper padding.""" + try: + filtered = list(filter(lambda a: a[1] == filename, map(lambda x: ttNsave._map_filename(x, filename), os.listdir(output_dir)))) + last = max(filtered)[0] + + for f in filtered: + if f[0] == last: + if f[2] == 0 or f[2] == group_id: + last += 1 + counter = last + except (ValueError, FileNotFoundError): + os.makedirs(output_dir, exist_ok=True) + counter = 1 + + if group_id == 0: + return f"{filename}.{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}.{ext}" + else: + return f"{filename}_({group_id}).{ext}" if number_padding is None else f"{filename}_{counter:0{number_padding}}_({group_id}).{ext}" + + @staticmethod + def filename_parser(output_dir: str, filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str, number_padding: int, group_id: int, ext: str) -> str: + """Parse the filename using provided patterns and replace them with actual values.""" + filename = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), filename_prefix) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + filename = re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), filename) + + subfolder = os.path.dirname(os.path.normpath(filename)) + filename = os.path.basename(os.path.normpath(filename)) + + output_dir = os.path.join(output_dir, subfolder) + + filename = ttNsave._get_filename_with_padding(output_dir, filename, number_padding, group_id, ext) + + return filename, subfolder + + @staticmethod + def folder_parser(output_dir: str, prompt: Dict[str, dict], my_unique_id: str): + output_dir = re.sub(r'%date:(.*?)%', lambda m: ttNsave._format_date(m.group(1), datetime.datetime.now()), output_dir) + all_inputs = ttNsave._gather_all_inputs(prompt, my_unique_id) + + return re.sub(r'%(.*?)%', lambda m: str(all_inputs.get(m.group(1), '')), output_dir) + + def images(self, images, filename_prefix, output_type, embed_workflow=True, ext="png", group_id=0): + FORMAT_MAP = { + "png": "PNG", + "jpg": "JPEG", + "jpeg": "JPEG", + "bmp": "BMP", + "tif": "TIFF", + "tiff": "TIFF", + "webp": "WEBP", + } + + if ext not in FORMAT_MAP: + raise ValueError(f"Unsupported file extension {ext}") + + if output_type in ("Hide", "Disabled"): + return list() + if output_type in ("Save", "Hide/Save"): + output_dir = self.output_dir if self.output_dir != folder_paths.get_temp_directory() else folder_paths.get_output_directory() + self.type = "output" + if output_type == "Preview": + output_dir = folder_paths.get_temp_directory() + filename_prefix = 'ttNpreview' + ext = "png" + + results=list() + for image in images: + img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) + + filename = filename_prefix.replace("%width%", str(img.size[0])).replace("%height%", str(img.size[1])) + + filename, subfolder = ttNsave.filename_parser(output_dir, filename, self.prompt, self.my_unique_id, self.number_padding, group_id, ext) + + file_path = os.path.join(output_dir, subfolder, filename) + + if (embed_workflow in (True, "True")) and (ext in ("png", "webp")): + if ext == "png": + metadata = PngInfo() + if self.prompt is not None: + metadata.add_text("prompt", json.dumps(self.prompt)) + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + metadata.add_text(x, json.dumps(self.extra_pnginfo[x])) + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, pnginfo=metadata, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + if ext == "webp": + img_exif = img.getexif() + workflow_metadata = '' + prompt_str = '' + if self.prompt is not None: + prompt_str = json.dumps(self.prompt) + img_exif[0x010f] = "Prompt:" + prompt_str + + if self.extra_pnginfo is not None: + for x in self.extra_pnginfo: + workflow_metadata += json.dumps(self.extra_pnginfo[x]) + + img_exif[0x010e] = "Workflow:" + workflow_metadata + exif_data = img_exif.tobytes() + + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, exif=exif_data, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + else: + if self.overwrite_existing or not os.path.isfile(file_path): + img.save(file_path, format=FORMAT_MAP[ext]) + else: + ttNl(f"File {file_path} already exists... Skipping").error().p() + + results.append({ + "filename": file_path, + "subfolder": subfolder, + "type": self.type + }) + + return results + +loader = ttNloader() +sampler = ttNsampler() + +#---------------------------------------------------------------DEPRECATED START-----------------------------------------------------------------------# +class ttNxyPlot: + def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): + self.x_node_type, self.x_type = ttNsampler.safe_split(xyPlotData.get("x_axis"), ': ') + self.y_node_type, self.y_type = ttNsampler.safe_split(xyPlotData.get("y_axis"), ': ') + + self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] + self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] + + self.grid_spacing = xyPlotData.get("grid_spacing") + self.latent_id = xyPlotData.get("latent_id") + self.output_individuals = xyPlotData.get("output_individuals") + + self.x_label, self.y_label = [], [] + self.max_width, self.max_height = 0, 0 + self.latents_plot = [] + self.image_list = [] + + self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 + self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 + + self.total = self.num_cols * self.num_rows + self.num = 0 + + self.save_prefix = save_prefix + self.image_output = image_output + self.prompt = prompt + self.extra_pnginfo = extra_pnginfo + self.my_unique_id = my_unique_id + + # Helper Functions + @staticmethod + def define_variable(plot_image_vars, value_type, value, index): + value_label = f"{value}" + if value_type == "seed": + seed = int(plot_image_vars["seed"]) + if index != 0: + index = 1 + if value == 'increment': + plot_image_vars["seed"] = seed + index + value_label = f"{plot_image_vars['seed']}" + + elif value == 'decrement': + plot_image_vars["seed"] = seed - index + value_label = f"{plot_image_vars['seed']}" + + elif value == 'randomize': + plot_image_vars["seed"] = random.randint(0, 0xffffffffffffffff) + value_label = f"{plot_image_vars['seed']}" + else: + plot_image_vars[value_type] = value + + if value_type in ["steps", "cfg", "denoise", "clip_skip", + "lora1_model_strength", "lora1_clip_strength", + "lora2_model_strength", "lora2_clip_strength", + "lora3_model_strength", "lora3_clip_strength"]: + value_label = f"{value_type}: {value}" + + if value_type in ["lora_model&clip_strength", "lora1_model&clip_strength", "lora2_model&clip_strength", "lora3_model&clip_strength"]: + loraNum = value_type.split("_")[0] + plot_image_vars[loraNum + "_model_strength"] = value + plot_image_vars[loraNum + "_clip_strength"] = value + + type_label = value_type.replace("_model&clip", "") + value_label = f"{type_label}: {value}" + + elif value_type == "positive_token_normalization": + value_label = f'(+) token norm.: {value}' + elif value_type == "positive_weight_interpretation": + value_label = f'(+) weight interp.: {value}' + elif value_type == "negative_token_normalization": + value_label = f'(-) token norm.: {value}' + elif value_type == "negative_weight_interpretation": + value_label = f'(-) weight interp.: {value}' + + elif value_type == "positive": + value_label = f"pos prompt {index + 1}" + elif value_type == "negative": + value_label = f"neg prompt {index + 1}" + + return plot_image_vars, value_label + + @staticmethod + def get_font(font_size): + return ImageFont.truetype(str(Path(ttNpaths.font_path)), font_size) + + @staticmethod + def update_label(label, value, num_items): + if len(label) < num_items: + return [*label, value] + return label + + @staticmethod + def rearrange_tensors(latent, num_cols, num_rows): + new_latent = [] + for i in range(num_rows): + for j in range(num_cols): + index = j * num_rows + i + new_latent.append(latent[index]) + return new_latent + + def calculate_background_dimensions(self): + border_size = int((self.max_width//8)*1.5) if self.y_type != "None" or self.x_type != "None" else 0 + bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * (self.y_type != "None") + bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * (self.x_type != "None") + + x_offset_initial = border_size if self.y_type != "None" else 0 + y_offset = border_size if self.x_type != "None" else 0 + + return bg_width, bg_height, x_offset_initial, y_offset + + def adjust_font_size(self, text, initial_font_size, label_width): + font = self.get_font(initial_font_size) + + left, _, right, _ = font.getbbox(text) + text_width = right - left + + scaling_factor = 0.9 + if text_width > (label_width * scaling_factor): + return int(initial_font_size * (label_width / text_width) * scaling_factor) + else: + return initial_font_size + + def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): + label_width = img.width if is_x_label else img.height + + # Adjust font size + font_size = self.adjust_font_size(text, initial_font_size, label_width) + font_size = min(max_font_size, font_size) # Ensure font isn't too large + font_size = max(min_font_size, font_size) # Ensure font isn't too small + + label_height = int(font_size * 1.5) if is_x_label else font_size + + label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) + d = ImageDraw.Draw(label_bg) + + font = self.get_font(font_size) + + # Check if text will fit, if not insert ellipsis and reduce text + try: + if d.textsize(text, font=font)[0] > label_width: + while d.textsize(text+'...', font=font)[0] > label_width and len(text) > 0: + text = text[:-1] + text = text + '...' + except: + if d.textlength(text, font=font) > label_width: + while d.textlength(text+'...', font=font) > label_width and len(text) > 0: + text = text[:-1] + text = text + '...' + + # Compute text width and height for multi-line text + text_lines = text.split('\n') + try: + text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) + except: + text_widths, text_heights = zip(*[(d.textlength(line, font=font), font_size) for line in text_lines]) + max_text_width = max(text_widths) + total_text_height = sum(text_heights) + + # Compute position for each line of text + lines_positions = [] + current_y = 0 + for line, line_width, line_height in zip(text_lines, text_widths, text_heights): + text_x = (label_width - line_width) // 2 + text_y = current_y + (label_height - total_text_height) // 2 + current_y += line_height + lines_positions.append((line, (text_x, text_y))) + + # Draw each line of text + for line, (text_x, text_y) in lines_positions: + d.text((text_x, text_y), line, fill='black', font=font) + + return label_bg + + def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise): + model, clip, vae, positive, negative = None, None, None, None, None + + if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": + model, clip, vae = loader.load_checkpoint(plot_image_vars['ckpt_name']) + + if plot_image_vars['lora1_name'] != "None": + model, clip = loader.load_lora(plot_image_vars['lora1_name'], model, clip, plot_image_vars['lora1_model_strength'], plot_image_vars['lora1_clip_strength']) + + if plot_image_vars['lora2_name'] != "None": + model, clip = loader.load_lora(plot_image_vars['lora2_name'], model, clip, plot_image_vars['lora2_model_strength'], plot_image_vars['lora2_clip_strength']) + + if plot_image_vars['lora3_name'] != "None": + model, clip = loader.load_lora(plot_image_vars['lora3_name'], model, clip, plot_image_vars['lora3_model_strength'], plot_image_vars['lora3_clip_strength']) + + # Check for custom VAE + if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: + vae = loader.load_vae(plot_image_vars['vae_name']) + + # CLIP skip + if not clip: + raise Exception("No CLIP found") + clip = clip.clone() + clip.clip_layer(plot_image_vars['clip_skip']) + + positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") + positive = [[positive, {"pooled_output": positive_pooled}]] + + negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") + negative = [[negative, {"pooled_output": negative_pooled}]] + + model = model if model is not None else plot_image_vars["model"] + clip = clip if clip is not None else plot_image_vars["clip"] + vae = vae if vae is not None else plot_image_vars["vae"] + positive = positive if positive is not None else plot_image_vars["positive_cond"] + negative = negative if negative is not None else plot_image_vars["negative_cond"] + + seed = plot_image_vars["seed"] + steps = plot_image_vars["steps"] + cfg = plot_image_vars["cfg"] + sampler_name = plot_image_vars["sampler_name"] + scheduler = plot_image_vars["scheduler"] + denoise = plot_image_vars["denoise"] + + if plot_image_vars["lora_name"] not in ('None', None): + model, clip = loader.load_lora(plot_image_vars["lora_name"], model, clip, plot_image_vars["lora_model_strength"], plot_image_vars["lora_clip_strength"]) + + # Sample + samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) + + # Decode images and store + latent = samples["samples"] + + # Add the latent tensor to the tensors list + latents_plot.append(latent) + + # Decode the image + image = vae.decode(latent) + + if self.output_individuals in [True, "True"]: + ttN_save = ttNsave(self.my_unique_id, self.prompt, self.extra_pnginfo) + ttN_save.images(image, self.save_prefix, self.image_output, group_id=self.num) + + # Convert the image from tensor to PIL Image and add it to the list + pil_image = ttNsampler.tensor2pil(image) + image_list.append(pil_image) + + # Update max dimensions + self.max_width = max(self.max_width, pil_image.width) + self.max_height = max(self.max_height, pil_image.height) + + # Return the touched variables + return image_list, self.max_width, self.max_height, latents_plot + + def validate_xy_plot(self): + if self.x_type == 'None' and self.y_type == 'None': + ttNl('No Valid Plot Types - Reverting to default sampling...').t(f'pipeKSampler[{self.my_unique_id}]').warn().p() + return False + else: + return True + + def plot_images_and_labels(self): + # Calculate the background dimensions + bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() + + # Create the white background image + background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) + + for row_index in range(self.num_rows): + x_offset = x_offset_initial + + for col_index in range(self.num_cols): + index = col_index * self.num_rows + row_index + img = self.image_list[index] + background.paste(img, (x_offset, y_offset)) + + # Handle X label + if row_index == 0 and self.x_type != "None": + label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) + label_y = (y_offset - label_bg.height) // 2 + background.alpha_composite(label_bg, (x_offset, label_y)) + + # Handle Y label + if col_index == 0 and self.y_type != "None": + label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) + label_bg = label_bg.rotate(90, expand=True) + + label_x = (x_offset - label_bg.width) // 2 + label_y = y_offset + (img.height - label_bg.height) // 2 + background.alpha_composite(label_bg, (label_x, label_y)) + + x_offset += img.width + self.grid_spacing + + y_offset += img.height + self.grid_spacing + + return sampler.pil2tensor(background) + + def get_latent(self, samples, latent_id): + # Extract the 'samples' tensor from the dictionary + latent_image_tensor = samples["samples"] + + # Split the tensor into individual image tensors + image_tensors = torch.split(latent_image_tensor, 1, dim=0) + + # Create a list of dictionaries containing the individual image tensors + latent_list = [{'samples': image} for image in image_tensors] + + # Set latent only to the first latent of batch + if latent_id >= len(latent_list): + ttNl(f'The selected latent_id ({latent_id}) is out of range.').t(f'pipeKSampler[{self.my_unique_id}]').warn().p() + ttNl(f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).').t(f'pipeKSampler[{self.my_unique_id}]').warn().p() + + latent_id = len(latent_list) - 1 + + return latent_list[latent_id] + + def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): + for x_index, x_value in enumerate(self.x_values): + plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) + self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) + if self.y_type != 'None': + for y_index, y_value in enumerate(self.y_values): + self.num += 1 + plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) + self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) + + ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() + self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image(plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise) + else: + self.num += 1 + ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() + self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image(plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise) + + # Rearrange latent array to match preview image grid + self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) + + # Concatenate the tensors along the first dimension (dim=0) + self.latents_plot = torch.cat(self.latents_plot, dim=0) + + return self.latents_plot + +class ttN_XYPlot: + version = '1.2.0' + lora_list = ["None"] + folder_paths.get_filename_list("loras") + lora_strengths = {"min": -4.0, "max": 4.0, "step": 0.01} + token_normalization = ["none", "mean", "length", "length+mean"] + weight_interpretation = ["comfy", "A1111", "compel", "comfy++"] + + loader_dict = { + "ckpt_name": folder_paths.get_filename_list("checkpoints"), + "vae_name": ["Baked-VAE"] + folder_paths.get_filename_list("vae"), + "clip_skip": {"min": -24, "max": -1, "step": 1}, + "lora1_name": lora_list, + "lora1_model_strength": lora_strengths, + "lora1_clip_strength": lora_strengths, + "lora1_model&clip_strength": lora_strengths, + "lora2_name": lora_list, + "lora2_model_strength": lora_strengths, + "lora2_clip_strength": lora_strengths, + "lora2_model&clip_strength": lora_strengths, + "lora3_name": lora_list, + "lora3_model_strength": lora_strengths, + "lora3_clip_strength": lora_strengths, + "lora3_model&clip_strength": lora_strengths, + "positive": [], + "positive_token_normalization": token_normalization, + "positive_weight_interpretation": weight_interpretation, + "negative": [], + "negative_token_normalization": token_normalization, + "negative_weight_interpretation": weight_interpretation, + } + + sampler_dict = { + "lora_name": lora_list, + "lora_model_strength": lora_strengths, + "lora_clip_strength": lora_strengths, + "lora_model&clip_strength": lora_strengths, + "steps": {"min": 1, "max": 100, "step": 1}, + "cfg": {"min": 0.0, "max": 100.0, "step": 1.0}, + "sampler_name": comfy.samplers.KSampler.SAMPLERS, + "scheduler": comfy.samplers.KSampler.SCHEDULERS, + "denoise": {"min": 0.0, "max": 1.0, "step": 0.01}, + "seed": ['increment', 'decrement', 'randomize'], + } + + plot_dict = {**sampler_dict, **loader_dict} + + plot_values = ["None",] + plot_values.append("---------------------") + for k in sampler_dict: + plot_values.append(f'sampler: {k}') + plot_values.append("---------------------") + for k in loader_dict: + plot_values.append(f'loader: {k}') + + def __init__(self): + pass + + rejected = ["None", "---------------------"] + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + #"info": ("INFO", {"default": "Any values not set by xyplot will be taken from the KSampler or connected pipeLoader", "multiline": True}), + "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}), + "latent_id": ("INT",{"min": 0, "max": 100, "step": 1, "default": 0, }), + "output_individuals": (["False", "True"],{"default": "False"}), + "flip_xy": (["False", "True"],{"default": "False"}), + "x_axis": (ttN_XYPlot.plot_values, {"default": 'None'}), + "x_values": ("STRING",{"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}), + "y_axis": (ttN_XYPlot.plot_values, {"default": 'None'}), + "y_values": ("STRING",{"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}), + }, + "hidden": { + "plot_dict": (ttN_XYPlot.plot_dict,), + "ttNnodeVersion": ttN_XYPlot.version, + }, + } + + RETURN_TYPES = ("XYPLOT", ) + RETURN_NAMES = ("xyPlot", ) + FUNCTION = "plot" + + CATEGORY = "🌏 tinyterra/legacy" + + def plot(self, grid_spacing, latent_id, output_individuals, flip_xy, x_axis, x_values, y_axis, y_values): + def clean_values(values): + original_values = values.split("; ") + cleaned_values = [] + + for value in original_values: + # Strip the semi-colon + cleaned_value = value.strip(';').strip() + + if cleaned_value == "": + continue + + # Try to convert the cleaned_value back to int or float if possible + try: + cleaned_value = int(cleaned_value) + except ValueError: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + + # Append the cleaned_value to the list + cleaned_values.append(cleaned_value) + + return cleaned_values + + if x_axis in self.rejected: + x_axis = "None" + x_values = [] + else: + x_values = clean_values(x_values) + + if y_axis in self.rejected: + y_axis = "None" + y_values = [] + else: + y_values = clean_values(y_values) + + if flip_xy == "True": + x_axis, y_axis = y_axis, x_axis + x_values, y_values = y_values, x_values + + xy_plot = {"x_axis": x_axis, + "x_vals": x_values, + "y_axis": y_axis, + "y_vals": y_values, + "grid_spacing": grid_spacing, + "latent_id": latent_id, + "output_individuals": output_individuals} + + return (xy_plot, ) + +class ttN_pipe_IN: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "pos": ("CONDITIONING",), + "neg": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + "clip": ("CLIP",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + },"optional": { + "image": ("IMAGE",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_IN.version}, + } + + RETURN_TYPES = ("PIPE_LINE", ) + RETURN_NAMES = ("pipe", ) + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/legacy" + + def flush(self, model, pos=0, neg=0, latent=0, vae=0, clip=0, image=0, seed=0): + pipe = {"model": model, + "positive": pos, + "negative": neg, + "vae": vae, + "clip": clip, + + "refiner_model": None, + "refiner_positive": None, + "refiner_negative": None, + "refiner_vae": None, + "refiner_clip": None, + + "samples": latent, + "images": image, + "seed": seed, + + "loader_settings": {} + } + return (pipe, ) + +class ttN_pipe_OUT: + version = '1.1.0' + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pipe": ("PIPE_LINE",), + }, + "hidden": {"ttNnodeVersion": ttN_pipe_OUT.version}, + } + + RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "PIPE_LINE",) + RETURN_NAMES = ("model", "pos", "neg", "latent", "vae", "clip", "image", "seed", "pipe") + FUNCTION = "flush" + + CATEGORY = "🌏 tinyterra/legacy" + + def flush(self, pipe): + model = pipe.get("model") + pos = pipe.get("positive") + neg = pipe.get("negative") + latent = pipe.get("samples") + vae = pipe.get("vae") + clip = pipe.get("clip") + image = pipe.get("images") + seed = pipe.get("seed") + + return model, pos, neg, latent, vae, clip, image, seed, pipe + +class ttN_TSC_pipeLoader: + version = '1.1.2' + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "config_name": (["Default",] + folder_paths.get_filename_list("configs"), {"default": "Default"} ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + "clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}), + + "lora1_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora1_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora1_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "lora2_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora2_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora2_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "lora3_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora3_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora3_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "positive": ("STRING", {"default": "Positive","multiline": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": {"model_override": ("MODEL",), "clip_override": ("CLIP",), "optional_lora_stack": ("LORA_STACK",),}, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_TSC_pipeLoader.version, "my_unique_id": "UNIQUE_ID",}} + + RETURN_TYPES = ("PIPE_LINE" ,"MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "INT",) + RETURN_NAMES = ("pipe","model", "positive", "negative", "latent", "vae", "clip", "seed",) + + FUNCTION = "adv_pipeloader" + CATEGORY = "🌏 tinyterra/legacy" + + def adv_pipeloader(self, ckpt_name, config_name, vae_name, clip_skip, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + lora3_name, lora3_model_strength, lora3_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, + empty_latent_width, empty_latent_height, batch_size, seed, model_override=None, clip_override=None, optional_lora_stack=None, prompt=None, my_unique_id=None): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + # Create Empty Latent + latent = sampler.emptyLatent(None, batch_size, empty_latent_width, empty_latent_height) + samples = {"samples":latent} + + # Load models + model, clip, vae = loader.load_checkpoint(ckpt_name, config_name) + + if model_override is not None: + model = model_override + + if clip_override is not None: + clip = clip_override + + if optional_lora_stack is not None: + for lora in optional_lora_stack: + model, clip = loader.load_lora(lora[0], model, clip, lora[1], lora[2]) + + if lora1_name != "None": + model, clip = loader.load_lora(lora1_name, model, clip, lora1_model_strength, lora1_clip_strength) + + if lora2_name != "None": + model, clip = loader.load_lora(lora2_name, model, clip, lora2_model_strength, lora2_clip_strength) + + if lora3_name != "None": + model, clip = loader.load_lora(lora3_name, model, clip, lora3_model_strength, lora3_clip_strength) + + # Check for custom VAE + if vae_name != "Baked VAE": + vae = loader.load_vae(vae_name) + + # CLIP skip + if not clip: + raise Exception("No CLIP found") + + clipped = clip.clone() + if clip_skip != 0: + clipped.clip_layer(clip_skip) + + positive = loader.nsp_parse(positive, seed, title='pipeLoader Positive', my_unique_id=my_unique_id) + + positive_embeddings_final, positive_pooled = advanced_encode(clipped, positive, positive_token_normalization, positive_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]] + + negative = loader.nsp_parse(negative, seed, title='pipeLoader Negative', my_unique_id=my_unique_id) + + negative_embeddings_final, negative_pooled = advanced_encode(clipped, negative, negative_token_normalization, negative_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + negative_embeddings_final = [[negative_embeddings_final, {"pooled_output": negative_pooled}]] + image = ttNsampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) + + + pipe = {"model": model, + "positive": positive_embeddings_final, + "negative": negative_embeddings_final, + "vae": vae, + "clip": clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": {"ckpt_name": ckpt_name, + "vae_name": vae_name, + + "lora1_name": lora1_name, + "lora1_model_strength": lora1_model_strength, + "lora1_clip_strength": lora1_clip_strength, + "lora2_name": lora2_name, + "lora2_model_strength": lora2_model_strength, + "lora2_clip_strength": lora2_clip_strength, + "lora3_name": lora3_name, + "lora3_model_strength": lora3_model_strength, + "lora3_clip_strength": lora3_clip_strength, + + "refiner_ckpt_name": None, + "refiner_vae_name": None, + "refiner_lora1_name": None, + "refiner_lora1_model_strength": None, + "refiner_lora1_clip_strength": None, + "refiner_lora2_name": None, + "refiner_lora2_model_strength": None, + "refiner_lora2_clip_strength": None, + + "clip_skip": clip_skip, + "positive": positive, + "positive_l": None, + "positive_g": None, + "positive_token_normalization": positive_token_normalization, + "positive_weight_interpretation": positive_weight_interpretation, + "positive_balance": None, + "negative": negative, + "negative_l": None, + "negative_g": None, + "negative_token_normalization": negative_token_normalization, + "negative_weight_interpretation": negative_weight_interpretation, + "negative_balance": None, + "empty_latent_width": empty_latent_width, + "empty_latent_height": empty_latent_height, + "batch_size": batch_size, + "seed": seed, + "empty_samples": samples,} + } + + return (pipe, model, positive_embeddings_final, negative_embeddings_final, samples, vae, clip, seed) + +class ttN_TSC_pipeKSampler: + version = '1.0.5' + upscale_methods = ["None", "nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (cls.upscale_methods,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "crop": (cls.crop_methods,), + "sampler_state": (["Sample", "Hold"], ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}) + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "xyPlot": ("XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "embeddingsList": (folder_paths.get_filename_list("embeddings"),), + "ttNnodeVersion": ttN_TSC_pipeKSampler.version}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT",) + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "image", "seed", ) + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/legacy" + + def sample(self, pipe, lora_name, lora_model_strength, lora_clip_strength, sampler_state, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, seed=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + samp_model = optional_model if optional_model is not None else pipe["model"] + samp_positive = optional_positive if optional_positive is not None else pipe["positive"] + samp_negative = optional_negative if optional_negative is not None else pipe["negative"] + samp_samples = optional_latent if optional_latent is not None else pipe["samples"] + samp_vae = optional_vae if optional_vae is not None else pipe["vae"] + samp_clip = optional_clip if optional_clip is not None else pipe["clip"] + + if seed in (None, 'undefined'): + samp_seed = pipe["seed"] + else: + samp_seed = seed + + def process_sample_state(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, disable_noise=disable_noise): + # Load Lora + if lora_name not in (None, "None"): + samp_model, samp_clip = loader.load_lora(lora_name, samp_model, samp_clip, lora_model_strength, lora_clip_strength) + + # Upscale samples if enabled + samp_samples = sampler.handle_upscale(samp_samples, upscale_method, factor, crop) + + samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + + latent = samp_samples["samples"] + samp_images = samp_vae.decode(latent) + + results = ttN_save.images(samp_images, save_prefix, image_output) + + sampler.update_value_by_id("results", my_unique_id, results) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samp_samples, + "images": samp_images, + "seed": samp_seed, + + "loader_settings": pipe["loader_settings"], + } + + sampler.update_value_by_id("pipe_line", my_unique_id, new_pipe) + + del pipe + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output(new_pipe) + + return {"ui": {"images": results}, + "result": sampler.get_output(new_pipe)} + + def process_hold_state(pipe, image_output, my_unique_id): + last_pipe = sampler.init_state(my_unique_id, "pipe_line", pipe) + + last_results = sampler.init_state(my_unique_id, "results", list()) + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output(last_pipe) + + return {"ui": {"images": last_results}, "result": sampler.get_output(last_pipe)} + + def process_xyPlot(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot): + + random.seed(seed) + + sampleXYplot = ttNxyPlot(xyPlot, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id) + + if not sampleXYplot.validate_xy_plot(): + return process_sample_state(pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent) + + plot_image_vars = { + "x_node_type": sampleXYplot.x_node_type, "y_node_type": sampleXYplot.y_node_type, + "lora_name": lora_name, "lora_model_strength": lora_model_strength, "lora_clip_strength": lora_clip_strength, + "steps": steps, "cfg": cfg, "sampler_name": sampler_name, "scheduler": scheduler, "denoise": denoise, "seed": samp_seed, + + "model": samp_model, "vae": samp_vae, "clip": samp_clip, "positive_cond": samp_positive, "negative_cond": samp_negative, + + "ckpt_name": pipe['loader_settings']['ckpt_name'], + "vae_name": pipe['loader_settings']['vae_name'], + "clip_skip": pipe['loader_settings']['clip_skip'], + "lora1_name": pipe['loader_settings']['lora1_name'], + "lora1_model_strength": pipe['loader_settings']['lora1_model_strength'], + "lora1_clip_strength": pipe['loader_settings']['lora1_clip_strength'], + "lora2_name": pipe['loader_settings']['lora2_name'], + "lora2_model_strength": pipe['loader_settings']['lora2_model_strength'], + "lora2_clip_strength": pipe['loader_settings']['lora2_clip_strength'], + "lora3_name": pipe['loader_settings']['lora3_name'], + "lora3_model_strength": pipe['loader_settings']['lora3_model_strength'], + "lora3_clip_strength": pipe['loader_settings']['lora3_clip_strength'], + "positive": pipe['loader_settings']['positive'], + "positive_token_normalization": pipe['loader_settings']['positive_token_normalization'], + "positive_weight_interpretation": pipe['loader_settings']['positive_weight_interpretation'], + "negative": pipe['loader_settings']['negative'], + "negative_token_normalization": pipe['loader_settings']['negative_token_normalization'], + "negative_weight_interpretation": pipe['loader_settings']['negative_weight_interpretation'], + } + + latent_image = sampleXYplot.get_latent(pipe["samples"]) + + latents_plot = sampleXYplot.get_labels_and_sample(plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise) + + samp_samples = {"samples": latents_plot} + images = sampleXYplot.plot_images_and_labels() + + if xyPlot["output_individuals"]: + results = ttN_save.images(images, save_prefix, image_output) + else: + results = ttN_save.images(images[-1], save_prefix, image_output) + + + sampler.update_value_by_id("results", my_unique_id, results) + + new_pipe = { + "model": samp_model, + "positive": samp_positive, + "negative": samp_negative, + "vae": samp_vae, + "clip": samp_clip, + + "samples": samp_samples, + "images": images, + "seed": samp_seed, + + "loader_settings": pipe["loader_settings"], + } + + sampler.update_value_by_id("pipe_line", my_unique_id, new_pipe) + + del pipe + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output(new_pipe) + + return {"ui": {"images": results}, "result": sampler.get_output(new_pipe)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save"): + preview_latent = False + + if sampler_state == "Sample" and xyPlot is None: + return process_sample_state(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, + steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent) + + elif sampler_state == "Sample" and xyPlot is not None: + return process_xyPlot(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot) + + elif sampler_state == "Hold": + return process_hold_state(pipe, image_output, my_unique_id) + +class ttN_pipeKSamplerAdvanced: + version = '1.0.5' + upscale_methods = ["None", "nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"pipe": ("PIPE_LINE",), + + "lora_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "upscale_method": (cls.upscale_methods,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "crop": (cls.crop_methods,), + "sampler_state": (["Sample", "Hold"], ), + + "add_noise": (["enable", "disable"], ), + + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "return_with_leftover_noise": (["disable", "enable"], ), + + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}) + }, + "optional": + {"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_latent": ("LATENT",), + "optional_vae": ("VAE",), + "optional_clip": ("CLIP",), + "xyPlot": ("XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "embeddingsList": (folder_paths.get_filename_list("embeddings"),), + "ttNnodeVersion": ttN_pipeKSamplerAdvanced.version}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT",) + RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "image", "seed", ) + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/legacy" + + def sample(self, pipe, + lora_name, lora_model_strength, lora_clip_strength, + sampler_state, add_noise, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0, + noise_seed=None, optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, start_at_step=None, end_at_step=None, return_with_leftover_noise=False): + + force_full_denoise = True + if return_with_leftover_noise == "enable": + force_full_denoise = False + + disable_noise = False + if add_noise == "disable": + disable_noise = True + + out = ttN_TSC_pipeKSampler.sample(self, pipe, lora_name, lora_model_strength, lora_clip_strength, sampler_state, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise, + optional_model, optional_positive, optional_negative, optional_latent, optional_vae, optional_clip, noise_seed, xyPlot, upscale_method, factor, crop, prompt, extra_pnginfo, my_unique_id, start_at_step, end_at_step, force_full_denoise, disable_noise) + + return out + +class ttN_pipeLoaderSDXL: + version = '1.1.2' + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + "vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + + "lora1_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora1_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora1_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "lora2_name": (["None"] + folder_paths.get_filename_list("loras"),), + "lora2_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "lora2_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "refiner_ckpt_name": (["None"] + folder_paths.get_filename_list("checkpoints"), ), + "refiner_vae_name": (["Baked VAE"] + folder_paths.get_filename_list("vae"),), + + "refiner_lora1_name": (["None"] + folder_paths.get_filename_list("loras"),), + "refiner_lora1_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "refiner_lora1_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "refiner_lora2_name": (["None"] + folder_paths.get_filename_list("loras"),), + "refiner_lora2_model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "refiner_lora2_clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + + "clip_skip": ("INT", {"default": -2, "min": -24, "max": 0, "step": 1}), + + "positive": ("STRING", {"default": "Positive","multiline": True}), + "positive_token_normalization": (["none", "mean", "length", "length+mean"],), + "positive_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "negative": ("STRING", {"default": "Negative", "multiline": True}), + "negative_token_normalization": (["none", "mean", "length", "length+mean"],), + "negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],), + + "empty_latent_width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "empty_latent_height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_pipeLoaderSDXL.version, "my_unique_id": "UNIQUE_ID"}} + + RETURN_TYPES = ("PIPE_LINE_SDXL" ,"MODEL", "CONDITIONING", "CONDITIONING", "VAE", "CLIP", "MODEL", "CONDITIONING", "CONDITIONING", "VAE", "CLIP", "LATENT", "INT",) + RETURN_NAMES = ("sdxl_pipe","model", "positive", "negative", "vae", "clip", "refiner_model", "refiner_positive", "refiner_negative", "refiner_vae", "refiner_clip", "latent", "seed",) + + FUNCTION = "adv_pipeloader" + CATEGORY = "🌏 tinyterra/legacy" + + def adv_pipeloader(self, ckpt_name, vae_name, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + refiner_ckpt_name, refiner_vae_name, + refiner_lora1_name, refiner_lora1_model_strength, refiner_lora1_clip_strength, + refiner_lora2_name, refiner_lora2_model_strength, refiner_lora2_clip_strength, + clip_skip, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation, + empty_latent_width, empty_latent_height, batch_size, seed, prompt=None, my_unique_id=None): + + def SDXL_loader(ckpt_name, vae_name, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation,): + + model: ModelPatcher | None = None + clip: CLIP | None = None + vae: VAE | None = None + + # Load models + model, clip, vae = loader.load_checkpoint(ckpt_name) + + if lora1_name != "None": + model, clip = loader.load_lora(lora1_name, model, clip, lora1_model_strength, lora1_clip_strength) + + if lora2_name != "None": + model, clip = loader.load_lora(lora2_name, model, clip, lora2_model_strength, lora2_clip_strength) + + # Check for custom VAE + if vae_name not in ["Baked VAE", "Baked-VAE"]: + vae = loader.load_vae(vae_name) + + # CLIP skip + if not clip: + raise Exception("No CLIP found") + + clipped = clip.clone() + if clip_skip != 0: + clipped.clip_layer(clip_skip) + + positive = loader.nsp_parse(positive, seed, title="pipeLoaderSDXL positive", my_unique_id=my_unique_id) + + positive_embeddings_final, positive_pooled = advanced_encode(clipped, positive, positive_token_normalization, positive_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + positive_embeddings_final = [[positive_embeddings_final, {"pooled_output": positive_pooled}]] + + negative = loader.nsp_parse(negative, seed) + + negative_embeddings_final, negative_pooled = advanced_encode(clipped, negative, negative_token_normalization, negative_weight_interpretation, w_max=1.0, apply_to_pooled='enable') + negative_embeddings_final = [[negative_embeddings_final, {"pooled_output": negative_pooled}]] + + return model, positive_embeddings_final, negative_embeddings_final, vae, clip + + # Create Empty Latent + latent = sampler.emptyLatent(None, batch_size, empty_latent_width, empty_latent_height) + samples = {"samples":latent} + + model, positive_embeddings, negative_embeddings, vae, clip = SDXL_loader(ckpt_name, vae_name, + lora1_name, lora1_model_strength, lora1_clip_strength, + lora2_name, lora2_model_strength, lora2_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation) + + if refiner_ckpt_name != "None": + refiner_model, refiner_positive_embeddings, refiner_negative_embeddings, refiner_vae, refiner_clip = SDXL_loader(refiner_ckpt_name, refiner_vae_name, + refiner_lora1_name, refiner_lora1_model_strength, refiner_lora1_clip_strength, + refiner_lora2_name, refiner_lora2_model_strength, refiner_lora2_clip_strength, + positive, positive_token_normalization, positive_weight_interpretation, + negative, negative_token_normalization, negative_weight_interpretation) + else: + refiner_model, refiner_positive_embeddings, refiner_negative_embeddings, refiner_vae, refiner_clip = None, None, None, None, None + + image = ttNsampler.pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0))) + + pipe = {"model": model, + "positive": positive_embeddings, + "negative": negative_embeddings, + "vae": vae, + "clip": clip, + + "refiner_model": refiner_model, + "refiner_positive": refiner_positive_embeddings, + "refiner_negative": refiner_negative_embeddings, + "refiner_vae": refiner_vae, + "refiner_clip": refiner_clip, + + "samples": samples, + "images": image, + "seed": seed, + + "loader_settings": {"ckpt_name": ckpt_name, + "vae_name": vae_name, + + "lora1_name": lora1_name, + "lora1_model_strength": lora1_model_strength, + "lora1_clip_strength": lora1_clip_strength, + "lora2_name": lora2_name, + "lora2_model_strength": lora2_model_strength, + "lora2_clip_strength": lora2_clip_strength, + "lora3_name": None, + "lora3_model_strength": None, + "lora3_clip_strength": None, + + "refiner_ckpt_name": refiner_ckpt_name, + "refiner_vae_name": refiner_vae_name, + "refiner_lora1_name": refiner_lora1_name, + "refiner_lora1_model_strength": refiner_lora1_model_strength, + "refiner_lora1_clip_strength": refiner_lora1_clip_strength, + "refiner_lora2_name": refiner_lora2_name, + "refiner_lora2_model_strength": refiner_lora2_model_strength, + "refiner_lora2_clip_strength": refiner_lora2_clip_strength, + + "clip_skip": clip_skip, + "positive_balance": None, + "positive": positive, + "positive_l": None, + "positive_g": None, + "positive_token_normalization": positive_token_normalization, + "positive_weight_interpretation": positive_weight_interpretation, + "negative_balance": None, + "negative": negative, + "negative_l": None, + "negative_g": None, + "negative_token_normalization": negative_token_normalization, + "negative_weight_interpretation": negative_weight_interpretation, + "empty_latent_width": empty_latent_width, + "empty_latent_height": empty_latent_height, + "batch_size": batch_size, + "seed": seed, + "empty_samples": samples,} + } + + return (pipe, model, positive_embeddings, negative_embeddings, vae, clip, refiner_model, refiner_positive_embeddings, refiner_negative_embeddings, refiner_vae, refiner_clip, samples, seed) + +class ttN_pipeKSamplerSDXL: + version = '1.0.2' + upscale_methods = ["None", "nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"] + crop_methods = ["disabled", "center"] + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"sdxl_pipe": ("PIPE_LINE_SDXL",), + + "upscale_method": (cls.upscale_methods,), + "factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}), + "crop": (cls.crop_methods,), + "sampler_state": (["Sample", "Hold"], ), + + "base_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "refiner_steps": ("INT", {"default": 20, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "image_output": (["Hide", "Preview", "Save", "Hide/Save"],), + "save_prefix": ("STRING", {"default": "ComfyUI"}) + }, + "optional": + {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "optional_model": ("MODEL",), + "optional_positive": ("CONDITIONING",), + "optional_negative": ("CONDITIONING",), + "optional_vae": ("VAE",), + "optional_refiner_model": ("MODEL",), + "optional_refiner_positive": ("CONDITIONING",), + "optional_refiner_negative": ("CONDITIONING",), + "optional_refiner_vae": ("VAE",), + "optional_latent": ("LATENT",), + "optional_clip": ("CLIP",), + #"xyPlot": ("XYPLOT",), + }, + "hidden": + {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", + "embeddingsList": (folder_paths.get_filename_list("embeddings"),), + "ttNnodeVersion": ttN_pipeKSamplerSDXL.version + }, + } + + RETURN_TYPES = ("PIPE_LINE_SDXL", "MODEL", "CONDITIONING", "CONDITIONING", "VAE", "MODEL", "CONDITIONING", "CONDITIONING", "VAE", "LATENT", "CLIP", "IMAGE", "INT",) + RETURN_NAMES = ("sdxl_pipe", "model", "positive", "negative" ,"vae", "refiner_model", "refiner_positive", "refiner_negative" ,"refiner_vae", "latent", "clip", "image", "seed", ) + OUTPUT_NODE = True + FUNCTION = "sample" + CATEGORY = "🌏 tinyterra/legacy" + + def sample(self, sdxl_pipe, sampler_state, + base_steps, refiner_steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0, + optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, + optional_refiner_model=None, optional_refiner_positive=None, optional_refiner_negative=None, optional_refiner_vae=None, + seed=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None, + start_step=None, last_step=None, force_full_denoise=False, disable_noise=False): + + sdxl_pipe = {**sdxl_pipe} + + my_unique_id = int(my_unique_id) + + ttN_save = ttNsave(my_unique_id, prompt, extra_pnginfo) + + sdxl_samples = optional_latent if optional_latent is not None else sdxl_pipe["samples"] + + sdxl_model = optional_model if optional_model is not None else sdxl_pipe["model"] + sdxl_positive = optional_positive if optional_positive is not None else sdxl_pipe["positive"] + sdxl_negative = optional_negative if optional_negative is not None else sdxl_pipe["negative"] + sdxl_vae = optional_vae if optional_vae is not None else sdxl_pipe["vae"] + sdxl_clip = optional_clip if optional_clip is not None else sdxl_pipe["clip"] + sdxl_refiner_model = optional_refiner_model if optional_refiner_model is not None else sdxl_pipe["refiner_model"] + sdxl_refiner_positive = optional_refiner_positive if optional_refiner_positive is not None else sdxl_pipe["refiner_positive"] + sdxl_refiner_negative = optional_refiner_negative if optional_refiner_negative is not None else sdxl_pipe["refiner_negative"] + sdxl_refiner_vae = optional_refiner_vae if optional_refiner_vae is not None else sdxl_pipe["refiner_vae"] + sdxl_refiner_clip = sdxl_pipe["refiner_clip"] + + if seed in (None, 'undefined'): + sdxl_seed = sdxl_pipe["seed"] + else: + sdxl_seed = seed + + def process_sample_state(sdxl_pipe, sdxl_samples, sdxl_model, sdxl_positive, sdxl_negative, sdxl_vae, sdxl_clip, sdxl_seed, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_vae, sdxl_refiner_clip, + base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, + image_output, save_prefix, prompt, my_unique_id, preview_latent, disable_noise=disable_noise): + + total_steps = base_steps + refiner_steps + + # Upscale samples if enabled + sdxl_samples = sampler.handle_upscale(sdxl_samples, upscale_method, factor, crop) + + + if (refiner_steps > 0) and (sdxl_refiner_model not in [None, "None"]): + # Base Sample + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, total_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=force_full_denoise, disable_noise=disable_noise) + + # Refiner Sample + sdxl_samples = sampler.common_ksampler(sdxl_refiner_model, sdxl_seed, total_steps, cfg, sampler_name, scheduler, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=base_steps, last_step=10000, force_full_denoise=True, disable_noise=True) + + latent = sdxl_samples["samples"] + sdxl_images = sdxl_refiner_vae.decode(latent) + del latent + else: + sdxl_samples = sampler.common_ksampler(sdxl_model, sdxl_seed, base_steps, cfg, sampler_name, scheduler, sdxl_positive, sdxl_negative, sdxl_samples, + denoise=denoise, preview_latent=preview_latent, start_step=0, last_step=base_steps, force_full_denoise=True, disable_noise=disable_noise) + + latent = sdxl_samples["samples"] + sdxl_images = sdxl_vae.decode(latent) + del latent + + results = ttN_save.images(sdxl_images, save_prefix, image_output) + + sampler.update_value_by_id("results", my_unique_id, results) + + new_sdxl_pipe = {"model": sdxl_model, + "positive": sdxl_positive, + "negative": sdxl_negative, + "vae": sdxl_vae, + "clip": sdxl_clip, + + "refiner_model": sdxl_refiner_model, + "refiner_positive": sdxl_refiner_positive, + "refiner_negative": sdxl_refiner_negative, + "refiner_vae": sdxl_refiner_vae, + "refiner_clip": sdxl_refiner_clip, + + "samples": sdxl_samples, + "images": sdxl_images, + "seed": sdxl_seed, + + "loader_settings": sdxl_pipe["loader_settings"], + } + + del sdxl_pipe + + sampler.update_value_by_id("pipe_line", my_unique_id, new_sdxl_pipe) + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output_sdxl(new_sdxl_pipe) + + return {"ui": {"images": results}, + "result": sampler.get_output_sdxl(new_sdxl_pipe)} + + def process_hold_state(sdxl_pipe, image_output, my_unique_id): + ttNl('Held').t(f'pipeKSamplerSDXL[{my_unique_id}]').p() + + last_pipe = sampler.init_state(my_unique_id, "pipe_line", sdxl_pipe) + + last_results = sampler.init_state(my_unique_id, "results", list()) + + if image_output in ("Hide", "Hide/Save"): + return sampler.get_output_sdxl(last_pipe) + + return {"ui": {"images": last_results}, "result": sampler.get_output_sdxl(last_pipe)} + + preview_latent = True + if image_output in ("Hide", "Hide/Save"): + preview_latent = False + + if sampler_state == "Sample" and xyPlot is None: + return process_sample_state(sdxl_pipe, sdxl_samples, sdxl_model, sdxl_positive, sdxl_negative, sdxl_vae, sdxl_clip, sdxl_seed, + sdxl_refiner_model, sdxl_refiner_positive, sdxl_refiner_negative, sdxl_refiner_vae, sdxl_refiner_clip, base_steps, refiner_steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, my_unique_id, preview_latent) + + #elif sampler_state == "Sample" and xyPlot is not None: + # return process_xyPlot(sdxl_pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot) + + elif sampler_state == "Hold": + return process_hold_state(sdxl_pipe, image_output, my_unique_id) + +#---------------------------------------------------------------DEPRECATED END-----------------------------------------------------------------------# + +TTN_LEGACY_VERSIONS = { + "pipeLoader": ttN_TSC_pipeLoader.version, + "pipeKSampler": ttN_TSC_pipeKSampler.version, + "pipeKSamplerAdvanced": ttN_pipeKSamplerAdvanced.version, + "pipeLoaderSDXL": ttN_pipeLoaderSDXL.version, + "pipeKSamplerSDXL": ttN_pipeKSamplerSDXL.version, + "pipeIN": ttN_pipe_IN.version, + "pipeOUT": ttN_pipe_OUT.version, + "xyPlot": ttN_XYPlot.version, +} +NODE_CLASS_MAPPINGS = { + "ttN xyPlot": ttN_XYPlot, + "ttN pipeIN": ttN_pipe_IN, + "ttN pipeOUT": ttN_pipe_OUT, + "ttN pipeLoader": ttN_TSC_pipeLoader, + "ttN pipeKSampler": ttN_TSC_pipeKSampler, + "ttN pipeKSamplerAdvanced": ttN_pipeKSamplerAdvanced, + "ttN pipeLoaderSDXL": ttN_pipeLoaderSDXL, + "ttN pipeKSamplerSDXL": ttN_pipeKSamplerSDXL, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "ttN xyPlot": "xyPlot", + "ttN pipeIN": "pipeIN (Legacy)", + "ttN pipeOUT": "pipeOUT (Legacy)", + "ttN pipeLoader": "pipeLoader v1 (Legacy)", + "ttN pipeKSampler": "pipeKSampler v1 (Legacy)", + "ttN pipeKSamplerAdvanced": "pipeKSamplerAdvanced v1 (Legacy)", + "ttN pipeLoaderSDXL": "pipeLoaderSDXL v1 (Legacy)", + "ttN pipeKSamplerSDXL": "pipeKSamplerSDXL v1 (Legacy)", +} diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNserver.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNserver.py new file mode 100644 index 0000000000000000000000000000000000000000..f740b203333aba5b2496ffe6c61880c325f953e9 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/ttNserver.py @@ -0,0 +1,32 @@ +import os +import sys + +from aiohttp import web + +import folder_paths +from server import PromptServer + +routes = PromptServer.instance.routes + +@routes.get("/ttN/reboot") +def restart(self): + try: + sys.stdout.close_log() + except Exception as e: + pass + + print(f"\nRestarting...\n\n") + if sys.platform.startswith('win32'): + return os.execv(sys.executable, ['"' + sys.executable + '"', '"' + sys.argv[0] + '"'] + sys.argv[1:]) + else: + return os.execv(sys.executable, [sys.executable] + sys.argv) + +@routes.get("/ttN/models") +def get_models(self): + ckpts = folder_paths.get_filename_list("checkpoints") + return web.json_response(list(map(lambda a: os.path.splitext(a)[0], ckpts))) + +@routes.get("/ttN/loras") +def get_loras(self): + loras = folder_paths.get_filename_list("loras") + return web.json_response(loras) \ No newline at end of file diff --git a/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/utils.py b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6ea817d81bb95d0bcb9e84c4f3767df699d18934 --- /dev/null +++ b/zavodik/nodes/ComfyUI_tinyterraNodes/ttNpy/utils.py @@ -0,0 +1,83 @@ +import os +from pathlib import Path + +import folder_paths + +class CC: + CLEAN = '\33[0m' + BOLD = '\33[1m' + ITALIC = '\33[3m' + UNDERLINE = '\33[4m' + BLINK = '\33[5m' + BLINK2 = '\33[6m' + SELECTED = '\33[7m' + + BLACK = '\33[30m' + RED = '\33[31m' + GREEN = '\33[32m' + YELLOW = '\33[33m' + BLUE = '\33[34m' + VIOLET = '\33[35m' + BEIGE = '\33[36m' + WHITE = '\33[37m' + + GREY = '\33[90m' + LIGHTRED = '\33[91m' + LIGHTGREEN = '\33[92m' + LIGHTYELLOW = '\33[93m' + LIGHTBLUE = '\33[94m' + LIGHTVIOLET = '\33[95m' + LIGHTBEIGE = '\33[96m' + LIGHTWHITE = '\33[97m' + +class ttNl: + def __init__(self, input_string): + self.header_value = f'{CC.LIGHTGREEN}[ttN] {CC.GREEN}' + self.label_value = '' + self.title_value = '' + self.input_string = f'{input_string}{CC.CLEAN}' + + def h(self, header_value): + self.header_value = f'{CC.LIGHTGREEN}[{header_value}] {CC.GREEN}' + return self + + def full(self): + self.h('tinyterraNodes') + return self + + def success(self): + self.label_value = f'Success: ' + return self + + def warn(self): + self.label_value = f'{CC.RED}Warning:{CC.LIGHTRED} ' + return self + + def error(self): + self.label_value = f'{CC.LIGHTRED}ERROR:{CC.RED} ' + return self + + def t(self, title_value): + self.title_value = f'{title_value}:{CC.CLEAN} ' + return self + + def p(self): + print(self.header_value + self.label_value + self.title_value + self.input_string) + return self + + def interrupt(self, msg): + raise Exception(msg) + +class ttNpaths: + ComfyUI = folder_paths.base_path + tinyterraNodes = Path(__file__).parent.parent + font_path = os.path.join(tinyterraNodes, 'arial.ttf') + +class AnyType(str): + """A special class that is always equal in not equal comparisons. Credit to pythongosssss""" + + def __eq__(self, _) -> bool: + return True + + def __ne__(self, __value: object) -> bool: + return False \ No newline at end of file diff --git a/zavodik/nodes/Text.txt b/zavodik/nodes/Text.txt new file mode 100644 index 0000000000000000000000000000000000000000..3387f4e6402d746a2dd98aa0a9a5ec6df7cf323e --- /dev/null +++ b/zavodik/nodes/Text.txt @@ -0,0 +1,10 @@ +pip install -r /workspace/ComfyUI/requirements.txt + +cd /workspace +for d in ComfyUI/custom_nodes/*; do +if [ -f "$d/requirements.txt" ]; then +pip install -r "$d/requirements.txt" +fi +done + +supervisorctl restart comfyui diff --git a/zavodik/nodes/ai_syndicate-animator-v2.json b/zavodik/nodes/ai_syndicate-animator-v2.json new file mode 100644 index 0000000000000000000000000000000000000000..e6854653ad2ec5372c4e3720f70101fb5fdb9d5e --- /dev/null +++ b/zavodik/nodes/ai_syndicate-animator-v2.json @@ -0,0 +1,5526 @@ +{ + "id": "7f3d25a6-3378-4b99-a89a-a9e729c1b27f", + "revision": 0, + "last_node_id": 708, + "last_link_id": 788, + "nodes": [ + { + "id": 131, + "type": "GetNode", + "pos": [ + -6791.092011784298, + 6620.01435038226 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "links": [ + 784 + ] + } + ], + "title": "Get_audio", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "audio" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 130, + "type": "SetNode", + "pos": [ + -9331.15089980405, + 5942.964461756138 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 63, + "mode": 0, + "inputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "link": 211 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [] + } + ], + "title": "Set_audio", + "properties": { + "previousName": "audio", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "audio" + ] + }, + { + "id": 304, + "type": "SetNode", + "pos": [ + -9217.222010923215, + 5943.275366463635 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 409 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_upload video", + "properties": { + "previousName": "upload video", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "upload video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 298, + "type": "SetNode", + "pos": [ + -9062.323738000434, + 5945.435323715617 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 626 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 477 + ] + } + ], + "title": "Set_upload photo", + "properties": { + "previousName": "", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "upload photo" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 246, + "type": "SetNode", + "pos": [ + -9623.91762446513, + 5942.71965179555 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 325 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_first frame", + "properties": { + "previousName": "first frame", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "first frame" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 290, + "type": "SetNode", + "pos": [ + -9483.012859151462, + 5943.158334138341 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 62, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 391 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_num_frames", + "properties": { + "previousName": "num_frames", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "num_frames" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 567, + "type": "Sam2Segmentation", + "pos": [ + -7540.43157844804, + 7040.231726394442 + ], + "size": [ + 272.087890625, + 182 + ], + "flags": { + "collapsed": false + }, + "order": 71, + "mode": 4, + "inputs": [ + { + "name": "sam2_model", + "type": "SAM2MODEL", + "link": 763 + }, + { + "name": "image", + "type": "IMAGE", + "link": 764 + }, + { + "name": "coordinates_positive", + "shape": 7, + "type": "STRING", + "link": null + }, + { + "name": "coordinates_negative", + "shape": 7, + "type": "STRING", + "link": null + }, + { + "name": "bboxes", + "shape": 7, + "type": "BBOX", + "link": 773 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 770 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-segment-anything-2", + "ver": "ebdb9869596b14babdaa83505e6863aad4618501", + "Node name for S&R": "Sam2Segmentation", + "cnr_id": "ComfyUI-segment-anything-2", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + false, + false + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 322, + "type": "GetNode", + "pos": [ + -8543.254348840981, + 6078.17615958599 + ], + "size": [ + 2364.969202244919, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "WANVIDEOMODEL", + "type": "WANVIDEOMODEL", + "links": [ + 730 + ] + } + ], + "title": "Get_model", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "model" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 552, + "type": "WanVideoUni3C_ControlnetLoader", + "pos": [ + -7456.017023809338, + 6396.776591152649 + ], + "size": [ + 331.0896676995608, + 154 + ], + "flags": { + "collapsed": true + }, + "order": 2, + "mode": 0, + "inputs": [ + { + "label": "compile_args", + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": null + } + ], + "outputs": [ + { + "label": "controlnet", + "name": "controlnet", + "type": "WANVIDEOCONTROLNET", + "links": [ + 725 + ] + } + ], + "title": "ANIMATOR STABILITY", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "774b05545259dd13fd3ab7cf1366b5633e297a3f", + "Node name for S&R": "WanVideoUni3C_ControlnetLoader", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "version": "7.7", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "Wan21_Uni3C_controlnet_fp16.safetensors", + "fp16", + "fp8_e4m3fn", + "main_device", + "sdpa" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 571, + "type": "DownloadAndLoadSAM2Model", + "pos": [ + -7539.859481057176, + 6881.8624841643705 + ], + "size": [ + 268.8560911331206, + 130.83905452888212 + ], + "flags": {}, + "order": 3, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "sam2_model", + "type": "SAM2MODEL", + "links": [ + 763 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-segment-anything-2", + "ver": "ebdb9869596b14babdaa83505e6863aad4618501", + "Node name for S&R": "DownloadAndLoadSAM2Model", + "cnr_id": "comfyui-segment-anything-2", + "ue_properties": { + "widget_ue_connectable": { + "model": true, + "segmentor": true, + "device": true, + "precision": true + }, + "version": "7.1", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "sam2.1_hiera_base_plus.safetensors", + "video", + "cuda", + "fp16" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 570, + "type": "GetNode", + "pos": [ + -7269.051625933231, + 7185.478985801624 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 4, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 760, + 764, + 765 + ] + } + ], + "title": "Get_upload video", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "upload video" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 576, + "type": "CM_FloatToInt", + "pos": [ + -7267.646413471171, + 7155.620053937722 + ], + "size": [ + 270, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 49, + "mode": 4, + "inputs": [ + { + "name": "a", + "type": "FLOAT", + "widget": { + "name": "a" + }, + "link": 772 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 771 + ] + } + ], + "properties": { + "aux_id": "jtydhr88/ComfyUI-Workflow-Encrypt", + "ver": "c01177221c31b8e5fbc062778fc8254aeb541638", + "Node name for S&R": "CM_FloatToInt", + "cnr_id": "ComfyMath", + "ue_properties": { + "widget_ue_connectable": { + "a": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 0 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 572, + "type": "SetNode", + "pos": [ + -7151.480867965347, + 7157.453049628667 + ], + "size": [ + 210, + 34 + ], + "flags": { + "collapsed": true + }, + "order": 83, + "mode": 4, + "inputs": [ + { + "name": "MASK", + "type": "MASK", + "link": 768 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_character_masks", + "properties": { + "previousName": "", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "character_masks_0" + ], + "color": "#1c5715", + "bgcolor": "#1f401b" + }, + { + "id": 573, + "type": "SetNode", + "pos": [ + -6982.114943357508, + 7001.087418538868 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": false + }, + "order": 86, + "mode": 4, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 769 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_masked_images", + "properties": { + "previousName": "", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "masked_images_0" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 575, + "type": "SimpleKnobNode", + "pos": [ + -6978.973358464633, + 7091.83637483854 + ], + "size": [ + 220, + 88 + ], + "flags": { + "collapsed": false + }, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": " ", + "type": "FLOAT", + "links": [ + 772 + ] + } + ], + "title": ".", + "properties": { + "aux_id": "plugcrypt/CRT-Nodes", + "ver": "2.1.7", + "Node name for S&R": "SimpleKnobNode", + "title": "", + "color": "#7700ff", + "min": 10, + "max": 200, + "step": 1, + "precision": 0, + "cnr_id": "crt-nodes", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 100, + "" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 565, + "type": "DrawMaskOnImage", + "pos": [ + -6978.796087628785, + 6972.609962629861 + ], + "size": [ + 270, + 102 + ], + "flags": { + "collapsed": true + }, + "order": 82, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 760 + }, + { + "name": "mask", + "type": "MASK", + "link": 761 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 759, + 769 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-KJNodes", + "ver": "9d7af919b91838fb22e31ad0107a6ddcf8bd7f3f", + "Node name for S&R": "DrawMaskOnImage", + "cnr_id": "comfyui-kjnodes", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "0, 0, 0", + "gpu" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 566, + "type": "BlockifyMask", + "pos": [ + -6978.023455663939, + 6944.035714999651 + ], + "size": [ + 270, + 82 + ], + "flags": { + "collapsed": true + }, + "order": 80, + "mode": 4, + "inputs": [ + { + "name": "masks", + "type": "MASK", + "link": 762 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 761, + 768 + ] + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-KJNodes", + "ver": "9d7af919b91838fb22e31ad0107a6ddcf8bd7f3f", + "Node name for S&R": "BlockifyMask", + "cnr_id": "comfyui-kjnodes", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 32, + "gpu" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 574, + "type": "GrowMaskWithBlur", + "pos": [ + -7272.114823069018, + 6882.9171299244135 + ], + "size": [ + 293.3291320800781, + 246 + ], + "flags": { + "collapsed": false + }, + "order": 78, + "mode": 4, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 770 + }, + { + "name": "expand", + "type": "INT", + "widget": { + "name": "expand" + }, + "link": 771 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 762 + ] + }, + { + "name": "mask_inverted", + "type": "MASK", + "links": null + } + ], + "properties": { + "aux_id": "kijai/ComfyUI-KJNodes", + "ver": "405c99ec16e32dfd122de5e9113da8cebf11ed2f", + "Node name for S&R": "GrowMaskWithBlur", + "cnr_id": "comfyui-kjnodes", + "ue_properties": { + "widget_ue_connectable": { + "expand": true, + "incremental_expandrate": true, + "tapered_corners": true, + "flip_input": true, + "blur_radius": true, + "lerp_alpha": true, + "decay_factor": true, + "fill_holes": true + }, + "version": "7.1", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 100, + 1, + true, + false, + 4, + 1, + 1, + false + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 580, + "type": "GetNode", + "pos": [ + -7268.408500218023, + 7218.216918451846 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 774 + ] + } + ], + "title": "MASK DEEP FAKE", + "properties": { + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "character_masks_0" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 578, + "type": "GetNode", + "pos": [ + -7108.43831184887, + 7216.594888636424 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 7, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 775 + ] + } + ], + "title": "BG IMAGES DEEP FAKE", + "properties": { + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "masked_images_0" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 581, + "type": "WanVideoEncode", + "pos": [ + -7828.375846823232, + 6397.589394324347 + ], + "size": [ + 272, + 242 + ], + "flags": { + "collapsed": true + }, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "WANVAE", + "link": 777 + }, + { + "name": "image", + "type": "IMAGE", + "link": 776 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 778 + ] + } + ], + "title": "ANIMATOR STABILITY", + "properties": { + "aux_id": "jtydhr88/ComfyUI-Workflow-Encrypt", + "ver": "df8f3e49daaad117cf3090cc916c83f3d001494c", + "Node name for S&R": "WanVideoEncode", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + false, + 272, + 272, + 144, + 128, + 0, + 1 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 103, + "type": "GetNode", + "pos": [ + -8891.233502416664, + 6456.1351740993005 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 164, + 360 + ] + } + ], + "title": "Get_height", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "height" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 553, + "type": "WanVideoUni3C_embeds", + "pos": [ + -7640.855523116489, + 6396.402980810835 + ], + "size": [ + 330.3546465201571, + 170 + ], + "flags": { + "collapsed": true + }, + "order": 74, + "mode": 0, + "inputs": [ + { + "label": "controlnet", + "name": "controlnet", + "type": "WANVIDEOCONTROLNET", + "link": 725 + }, + { + "label": "render_latent", + "name": "render_latent", + "shape": 7, + "type": "LATENT", + "link": 778 + }, + { + "label": "render_mask", + "name": "render_mask", + "shape": 7, + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "label": "uni3c_embeds", + "name": "uni3c_embeds", + "type": "UNI3C_EMBEDS", + "links": [ + 729 + ] + } + ], + "title": "ANIMATOR STABILITY", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "774b05545259dd13fd3ab7cf1366b5633e297a3f", + "Node name for S&R": "WanVideoUni3C_embeds", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "version": "7.7", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 1, + 0, + 1, + true + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 691, + "type": "Label (rgthree)", + "pos": [ + -7833.806410914701, + 6481.774527689218 + ], + "size": [ + 430.265625, + 330 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 9, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "░░░░░░░░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▓▓▓████████████████▓▓▓▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▓▓██████████████████████████████▓▓▒▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▓████████████████████████████████████████▓▓▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▓████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▓▓▓▓▓███████████▓▒▒▒▒░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░▒▒▒▒▒▓██████████▓▓▓▓▓████████████████████████▓▓▓▓▓▓█████████▓▒▒▒▒▒░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░▒▒▒████████████████▓▓▓▓▓▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓████████████████▒▒▒░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░▒▒▒▓████████████▓▓▓▓▓▓▓███████████████████████████▓▓▓▓▓▓████████████▓▒▒▒░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░▒▒█████████▓▓▒▓▓██████▓▓▓▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▓▓▓▓███████▓▓▒▓▓█████████▓▒░░░░░░░░░░░░░░\n░░░░░░░░░░▒▒▒▒████████▓▒▓▓█████▓▒▒▒▓███████████████████████████▓▓▒▒▒▓█████▓▓▒▓▓███████▒▒▒▒░░░░░░░░░░\n░░░░░░░░░░▒▒███████▓▒▓████▓▒▒▓▓█████▓▓▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓█████▓▓▒▒▓████▓▒▓███████▒░░░░░░░░░░░\n░░░░░░░▒▒▒▒██████▓▓███▓▓▒▓█████▓▒▒▒▓▓█████████████████████████▓▓▓▒▒▒▓█████▓▒▒▓███▓▓██████▓▒▒▒░░░░░░░\n░░░▒▒▒▒░▒▓█████████▓▒▓▓████▓▒▓▓▓████████████████████████████████████▓▓▓▒▓▓███▓▓▒▓█████████▓▒▒▒▒░░░░░\n░░░░▒▒▒▒████████▓▓▓▓███▓▒▓▓█████████████████████████████████████████████▓▓▒▒▓███▓▓▒▓████████▒▒▒▒▒░░░\n░░░▒▒▒▒███████▓▒▓▓██▓▒▓▓████████████████████████████████████████████████████▓▓▒▓███▓▒▒███████▒▒▒▒░░░\n░░░▒▒▒██████▓▒▓███▓▓▓██████████████████████████████████████████████████████████▓▓▓▓██▓▓▓██████▒▒▒░░░\n▒▒▒▒▒█████▓▒▓██▓▒▓███████████████▓▓██████████████████████████████▓▓███████████████▓▒▓██▓▓▓█████▒▒▒▒░\n▒▒▒▒▓█████▓██▓▒▓█████████████▓▓▒▓▓▓███████████████████████████████▓▒▒▒▓█████████████▓▒▓██▓▓█████▒▒▒▒\n▒▒▒▓███████▓▓▓████████████▓▓▓▓▓▒▓▓█████████████████████████████████▓▒▒▓▓▒▓████████████▓▓▓███████▓▒▒▒\n▒▒▒███████▓▓█████████████▓▓▒▓▓▓▓▓██████████████████████████████████▓▓▒▒▓▓▓▓█████████████▓▓▓██████▓▒▒\n▒▒██████▓▒▓██████████████▓▓▓▓▓▓▓▓███████████████████████████████████▓▓▓▓▒▓▓███████████████▓▓██████▒▒\n▒▒█████▓▓████████████████▓▓▓▓▓▓▓▓█████▓▓▓▓██████████████████████████▓▒▓▓▒▒▒████████████████▓▒█████▓▒\n▒█████▓▓█████████████████▓▒▓▓▓▓▓█████▓▓▓▒▒▓█████████████████████████▓▓▓▒▒▓▓██████████████████▓█████▒\n▒██████████████▓▓▓███████▓▒▒▓▓▓▓█████▓▓▓▒▓▓████████████▓▓▓▓█████████▓▓▒▒▒▓▒███████▓▓▓██████████████▒\n▓█████████████▓▒▓▓███████▓▓▓▓▓▓▓▓███████████████████▓▓▓▓▓▓▓▓▓▓██████▓▒▓▒▒▒▓███████▓▓▓▓█████████████▓\n█████████████▓▓▓▓▓███████▓▓▓▓▓▓▓▓██████████████████▓▒▓▓▓▓▓▓▓▓▒▓▓███▓▓▓▓▓▒▓▓███████▓▓▓▓▓▓████████████\n███████████▓▓▓▓▓▓▓▓███████▓▓▓▓▓▓▓▓████████████████▓▒▒▓▓▓▓▓▓▓▓▓▓▓██▓▓▓▓▓▒▓▓████████▓▓▓▒▒▓▓███████████\n██████████▓▒▓▓▒▒▓▓▓████████▓▓▓▓▓▒▓▓███████████████▓▓▓▓▓▓▓▓▓▓▒▓▓▓█▓▓▓▓▓▓▒▓▓███████▓▓▓▓▓▒▓▒▓██████████\n██████████▓▓▓▓▓▒▓▓▓▓███████▓▓▒▓▒▓▓▒▓██████████████▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓████████▓▓▓▓▓▒▒▓▒██████████\n██████████▓▓▒▒▒▒▒▓▓▓████████▓▓▒▓▓▓▒▒▓██████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓████████▓▓▓▓▓▓▒▒▒▓██████████\n███████████▓▓▒▒▒▒▒▓▓▓████████▓▓▓▓▓▒▒▓▓▓▓█████████████▓▓▓▒▒▒▓▓▓▓▓▓▓▓▒▓▒▓████████▓▓▓▓▓▓▓▒▒▓███████████\n████████████▓▓▓▒▒▒▒▒▓▓█████████▓▓▓▒▒▓▓▒▓▒▓▓██████████████▓▓▓▓▓▓▓▓▓▓▓▓▓████████▓▓▒▓▓▓▓▓▓▓████████████\n▓█████████████▓▓▒▒▒▓▒▒▓██████████▓▓▒▒▒▓▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▓▓▒▓▓█████████▓▓▓▓▓▓▒▓▓█████████████▓\n▒██████████████▓▓▓▒▓▓▓▓▒███████████▓▓▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓▓▓█████████▓▓▒▓▓▓▒▓▓▓██████████████▒\n▒█████▓▓█████████▓▓▒▓▓▒▒▓▓███████████▓▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▒▓▓▒▒▒▓▒▓▓███████████▓▓▓▓▓▓▓▓▓██████████▓█████▒\n▒▒█████▓▓██████████▓▓▓▒▒▓▓▓▓█████████████▓▓▓▒▓▒▒▒▒▒▒▒▒▒▒▓▓▓█████████████▓▓▓▓▓▓▓▓▓▓█████████▓▒█████▓▒\n▒▒██████▓▓███████████▓▓▓▓▓▒▒▓▓████████████████████▓███████████████████▓▓▒▓▓▒▓▓▓▓██████████▓▓██████▒▒\n▒▒▒██████▓▓▓███████████▓▓▒▒▒▓▓▓▓▓██████████████████████████████████▓▓▒▓▒▓▒▒▓▓███████████▓▓▓██████▓▒▒\n▒▒▒▓███████▓▓▓███████████▓▓▓▓▓▓▒▒▒▓▓████████████████████████████▓▓▓▓▓▒▒▒▓▓▓███████████▓▓▓███████▓▒▒▒\n▒▒▒▒█████▓▓██▓▒▓█████████████▓▓▒▓▒▓▓▓▒▓▓▓██████████████████▓▓▓▓▓▒▓▓▒▓▓▓▓████████████▓▓▓██▓▓█████▒▒▒▒\n▒▒▒▒▒██████▓▓██▓▒▓██████████████▓▓▓▓▒▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▓▓▓▒▓▒▓▓▓██████████████▓▒▓██▓▓▓█████▒▒▒▒▒\n▒▒▒▒▒▒██████▓▓▓███▓▓▓█████████████████▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▒▓▓▓█████████████████▓▓▓▓██▓▓▓██████▒▒▒▒▒▒\n▒▒▒▒▒▒▒███████▓▒▓███▓▓▓▓████████████████████████████████████████████████████▓▓▓▓███▓▓▓███████▒▒▒▒░░░\n▒▒▒▒▒▒▒▒████████▓▒▓▓███▓▒▓▓██████████████████████████████████████████████▓▓▒▓███▓▓▒▓████████▒▒▒▒░░░░\n░░░▒▒▒▒▒▒▓█████████▓▒▓▓███▓▓▒▓▓██████████████████████████████████████▓▓▒▓▓███▓▓▒▓█████████▓▒▒▒▒▒░░░░\n░░░░░░░▒▒▒▓██████▓▓███▓▒▓▓████▓▓▒▒▓▓▓██████████████████████████▓▓▓▒▒▓▓████▓▓▒▓████▓██████▓▒▒░░░░░░░░\n░░░░░░░▒▒▒▒▒███████▓▓▓████▓▒▓▓▓█████▓▓▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓█████▓▓▓▒▓████▓▓▓███████▒▒▒▒▒░░░░░░░\n░░░░░░░░░░▒▒▒▒███████▓▒▒▓▓████▓▓▒▒▒▓▓██████████████████████████▓▓▒▒▒▒▓█████▓▒▒▓███████▒▒▒▒▒▒▒░░░░░░░\n░░░░░░░░░░░░░▒▒▓█████████▓▓▒▓▓██████▓▓▓▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓██████▓▓▒▒▓█████████▓▒░░░░░░░░░░░░░░\n░░░░░░░░░░░░░▒▒▒▒▓████████████▓▓▓▓▓▓████████████████████████████▓▓▓▒▓▓▓███████████▓▒▒▒░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░▒▒▒████████████████▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▓▓████████████████▒▒▒░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░▒▒▒▒▒▓██████████▓▓▓████████████████████████████▓▓▓██████████▓▒▒▒▒▒░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▓████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███████████▓▒▒▒░░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▓▓█████████████████████████████████████████▓▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▓████████████████████████████████▓▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░\n░░░░░░░░░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▓▓▓████████████████▓▓▓▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░", + "properties": { + "fontSize": 6, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "Black", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 577, + "type": "MarkdownNote", + "pos": [ + -7099.427937302375, + 7256.112987687375 + ], + "size": [ + 307.7225036621094, + 88 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [], + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.1", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "### Face swap - 100-150" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 689, + "type": "Label (rgthree)", + "pos": [ + -5428.262370503212, + 6614.841764235209 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 90, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 695, + "type": "Label (rgthree)", + "pos": [ + -10479.556093940162, + 6573.132034773632 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 12, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 90, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 693, + "type": "Label (rgthree)", + "pos": [ + -9727.359026595424, + 7386.527541595241 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 692, + "type": "Label (rgthree)", + "pos": [ + -8255.330531208223, + 7389.112188993839 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 687, + "type": "Label (rgthree)", + "pos": [ + -6677.065336305179, + 5850.49147658514 + ], + "size": [ + 2029.66015625, + 1595 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ \n█████ █████\n█████ █████\n ███ ███ \n█████ █████\n█████ █████\n█████ █ ███\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n████ ████\n████ ████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n███ █ █ ███\n█████ █████\n█████ █████\n ███ ███ \n█████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n ████ ███ \n█████ █████\n█████ █████\n███ █ █ ███\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ ████\n███ ███\n█████ █████\n█████ █████\n ████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n█████ █████\n ███ ███ \n█████ █████\n█████ █████\n █████████████ █ ████ ████ █████ ████ █ ████ ██████████ ████ █ ████ ██████████ ████ █ ████ █████████\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███", + "properties": { + "fontSize": 29, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 699, + "type": "Label (rgthree)", + "pos": [ + -6184.185566679819, + 5784.241713103913 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 698, + "type": "Label (rgthree)", + "pos": [ + -6732.684860022839, + 5781.270086056612 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 697, + "type": "Label (rgthree)", + "pos": [ + -8230.707320105488, + 5783.592601498597 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 696, + "type": "Label (rgthree)", + "pos": [ + -9720.57138729446, + 5784.753139239801 + ], + "size": [ + 1539.7421875, + 66 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 19, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n███ ████████████████████████████████████████████████████████████████████████████████████████████ ███\n██████████████ █ ████ ██████████ ███████████ ██████████ ███████████ ██████████ ████ ███████████████ ", + "properties": { + "fontSize": 22, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 76, + "type": "LoadImage", + "pos": [ + -9324.633004286721, + 5913.652354165624 + ], + "size": [ + 422.96553058008203, + 894.8161495851265 + ], + "flags": { + "collapsed": false + }, + "order": 20, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 626 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.75", + "Node name for S&R": "LoadImage", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "hf_20260224_160523_3ed2b45b-3fa3-4efd-9264-4b1f4ac77c79 (2).png", + "image" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 22, + "type": "WanVideoModelLoader", + "pos": [ + -9618.89319045479, + 6273.53123545648 + ], + "size": [ + 477.4410095214844, + 338 + ], + "flags": { + "collapsed": true + }, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": null + }, + { + "name": "block_swap_args", + "shape": 7, + "type": "BLOCKSWAPARGS", + "link": null + }, + { + "name": "lora", + "shape": 7, + "type": "WANVIDLORA", + "link": null + }, + { + "name": "vram_management_args", + "shape": 7, + "type": "VRAM_MANAGEMENTARGS", + "link": null + }, + { + "name": "extra_model", + "shape": 7, + "type": "VACEPATH", + "link": null + }, + { + "name": "fantasytalking_model", + "shape": 7, + "type": "FANTASYTALKINGMODEL", + "link": null + }, + { + "name": "multitalk_model", + "shape": 7, + "type": "MULTITALKMODEL", + "link": null + }, + { + "name": "fantasyportrait_model", + "shape": 7, + "type": "FANTASYPORTRAITMODEL", + "link": null + }, + { + "name": "vace_model", + "shape": 7, + "type": "VACEPATH", + "link": null + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "slot_index": 0, + "links": [ + 471 + ] + } + ], + "title": "LOADER", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "d9b1f4d1a5aea91d101ae97a54714a5861af3f50", + "Node name for S&R": "WanVideoModelLoader", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "WanModel.safetensors", + "fp16", + "fp8_e4m3fn", + "offload_device", + "sdpa", + "default" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 349, + "type": "WanVideoVAELoader", + "pos": [ + -9617.734453724594, + 6307.727659653327 + ], + "size": [ + 210, + 130 + ], + "flags": { + "collapsed": true + }, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": null + } + ], + "outputs": [ + { + "name": "vae", + "type": "WANVAE", + "slot_index": 0, + "links": [ + 460 + ] + } + ], + "title": "LOADER", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "d9b1f4d1a5aea91d101ae97a54714a5861af3f50", + "Node name for S&R": "WanVideoVAELoader", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "vae.safetensors", + "fp16", + false, + false + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 59, + "type": "CLIPVisionLoader", + "pos": [ + -9618.78092849691, + 6338.382258022964 + ], + "size": [ + 315, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 23, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 169 + ] + } + ], + "title": "LOADER", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.26", + "Node name for S&R": "CLIPVisionLoader", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "klip_vision.safetensors" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 106, + "type": "SetNode", + "pos": [ + -9616.378454276242, + 6369.621669860311 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "link": 169 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [] + } + ], + "title": "LOADER", + "properties": { + "previousName": "clip_vision", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "clip_vision" + ] + }, + { + "id": 354, + "type": "WanVideoLoraSelectMulti", + "pos": [ + -9615.487545971542, + 6399.665014559637 + ], + "size": [ + 624.7625759098177, + 342 + ], + "flags": { + "collapsed": true + }, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "prev_lora", + "shape": 7, + "type": "WANVIDLORA", + "link": null + }, + { + "name": "blocks", + "shape": 7, + "type": "SELECTEDBLOCKS", + "link": null + } + ], + "outputs": [ + { + "name": "lora", + "type": "WANVIDLORA", + "links": [ + 463 + ] + } + ], + "title": "LOADER", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "1.4.5", + "Node name for S&R": "WanVideoLoraSelectMulti", + "cnr_id": "comfyui-wanvideowrapper", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "light.safetensors", + 1, + "wan.reworked.safetensors", + 0.3, + "WanPusa.safetensors", + 0.9, + "WanFun.reworked.safetensors", + 0.5, + "none", + 0, + false, + false + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 78, + "type": "WanVideoSetLoRAs", + "pos": [ + -9612.957059127162, + 6432.090159882196 + ], + "size": [ + 178.5533203125, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 471 + }, + { + "name": "lora", + "shape": 7, + "type": "WANVIDLORA", + "link": 463 + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "links": [ + 437 + ] + } + ], + "title": "LOADER", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "772642b4f132a077f655ff24d11cb99ef108aff5", + "Node name for S&R": "WanVideoSetLoRAs", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 491, + "type": "CLIPLoader", + "pos": [ + -9614.178436364404, + 6464.112798787537 + ], + "size": [ + 369.9965894981069, + 106 + ], + "flags": { + "collapsed": true + }, + "order": 25, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 625 + ] + } + ], + "title": "LOADER", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CLIPLoader", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "text_enc.safetensors", + "wan", + "default" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 342, + "type": "SetNode", + "pos": [ + -9493.134394812247, + 6467.258501643219 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "CLIP", + "type": "CLIP", + "link": 625 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "LOADER", + "properties": { + "previousName": "", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "clip" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 348, + "type": "SetNode", + "pos": [ + -9613.460317929437, + 6496.242637452796 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "WANVAE", + "type": "WANVAE", + "link": 460 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [] + } + ], + "title": "LOADER", + "properties": { + "previousName": "VAE", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 321, + "type": "SetNode", + "pos": [ + -9502.941735208487, + 6494.117277772619 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 66, + "mode": 0, + "inputs": [ + { + "name": "WANVIDEOMODEL", + "type": "WANVIDEOMODEL", + "link": 437 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [] + } + ], + "title": "LOADER", + "properties": { + "previousName": "model", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "model" + ] + }, + { + "id": 488, + "type": "CLIPTextEncode", + "pos": [ + -9654.934925848074, + 6999.958479007995 + ], + "size": [ + 1123.2634283020552, + 165.72944250813725 + ], + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 623 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 622 + ] + } + ], + "title": "NEGATIVE PROMPT", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CLIPTextEncode", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 28, + "type": "WanVideoDecode", + "pos": [ + -6963.340697192938, + 6111.12578022894 + ], + "size": [ + 288.7800116170001, + 271.89269453390807 + ], + "flags": {}, + "order": 84, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "WANVAE", + "link": 743 + }, + { + "name": "samples", + "type": "LATENT", + "link": 372 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 242, + 752 + ] + } + ], + "title": "DC", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "d9b1f4d1a5aea91d101ae97a54714a5861af3f50", + "Node name for S&R": "WanVideoDecode", + "cnr_id": "ComfyUI-WanVideoWrapper", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + false, + 272, + 272, + 144, + 128, + "default" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 158, + "type": "ImageResizeKJv2", + "pos": [ + -6961.199939280084, + 6411.356496056187 + ], + "size": [ + 284.86436280241344, + 388.4758747868412 + ], + "flags": {}, + "order": 87, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 242 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 783 + ] + }, + { + "name": "width", + "type": "INT", + "links": [] + }, + { + "name": "height", + "type": "INT", + "links": [] + }, + { + "name": "mask", + "type": "MASK", + "links": null + } + ], + "title": "IMAGE", + "properties": { + "aux_id": "kijai/ComfyUI-KJNodes", + "ver": "ad37ce656c13e9abea002b46e3a89be3dba32355", + "Node name for S&R": "ImageResizeKJv2", + "cnr_id": "comfyui-kjnodes", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + 720, + 1280, + "lanczos", + "crop", + "0, 0, 0", + "center", + 16, + "cpu" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 314, + "type": "SetNode", + "pos": [ + -8541.268448614215, + 6470.629258338345 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 70, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 422 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "SET", + "properties": { + "previousName": "face_images", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 315, + "type": "GetNode", + "pos": [ + -8878.96390853805, + 6232.840887764706 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": false + }, + "order": 26, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 424 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "face_images" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 291, + "type": "GetNode", + "pos": [ + -8880.213692700387, + 6320.663465087024 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": false + }, + "order": 27, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 393 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "num_frames" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 128, + "type": "GetNode", + "pos": [ + -8888.712930309679, + 6417.644397305593 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 28, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 366 + ] + } + ], + "title": "SET", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "clip_vision" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 266, + "type": "GetNode", + "pos": [ + -8736.894852089747, + 6416.53237199065 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 29, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "WANVAE", + "type": "WANVAE", + "links": [ + 346 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "VAE" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 313, + "type": "GetNode", + "pos": [ + -8768.420180842317, + 6458.552129686968 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 30, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 423 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "pose_images" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 102, + "type": "GetNode", + "pos": [ + -8893.295410824212, + 6487.923708144739 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": false + }, + "order": 31, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 163, + 359 + ] + } + ], + "title": "SET", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "width" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 494, + "type": "GetNode", + "pos": [ + -8893.977248865776, + 6566.634782994484 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": false + }, + "order": 32, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 623, + 624 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "clip" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 309, + "type": "GetImageSizeAndCount", + "pos": [ + -8901.77574853287, + 6718.1120874367225 + ], + "size": [ + 220.59356935482677, + 87.35130570931028 + ], + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 414 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [] + }, + { + "label": "720 width", + "name": "width", + "type": "INT", + "links": [ + 412 + ] + }, + { + "label": "1280 height", + "name": "height", + "type": "INT", + "links": [ + 413 + ] + }, + { + "label": "397 count", + "name": "count", + "type": "INT", + "links": [] + } + ], + "title": "SET", + "properties": { + "aux_id": "kijai/ComfyUI-KJNodes", + "ver": "f37df472dfb325f1f72c90e2df56e72f40f850f0", + "Node name for S&R": "GetImageSizeAndCount", + "cnr_id": "comfyui-kjnodes", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 307, + "type": "SetNode", + "pos": [ + -8679.388605910492, + 6746.486585347318 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 72, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 412 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [] + } + ], + "title": "SET", + "properties": { + "previousName": "width", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "width" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 308, + "type": "SetNode", + "pos": [ + -8682.070038264128, + 6787.285530670832 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 73, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 413 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [] + } + ], + "title": "SET", + "properties": { + "previousName": "height", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "height" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 490, + "type": "WanVideoTextEmbedBridge", + "pos": [ + -7835.362313799308, + 6475.898359650881 + ], + "size": [ + 555.1503178151137, + 373.72746903138886 + ], + "flags": { + "collapsed": true + }, + "order": 67, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 616 + }, + { + "name": "negative", + "shape": 7, + "type": "CONDITIONING", + "link": 622 + } + ], + "outputs": [ + { + "name": "text_embeds", + "type": "WANVIDEOTEXTEMBEDS", + "links": [ + 620 + ] + } + ], + "title": "SET", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "e4627466f501d48344620b4d4d385b8abedaea48", + "Node name for S&R": "WanVideoTextEmbedBridge", + "cnr_id": "ComfyUI-WanVideoWrapper", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 562, + "type": "TSColorMatch", + "pos": [ + -7835.420811413771, + 6178.037055486034 + ], + "size": [ + 553.8801506549271, + 301.12165844757874 + ], + "flags": {}, + "order": 88, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 752 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 785 + ] + } + ], + "title": "CHUNK", + "properties": { + "aux_id": "teskor-hub/NEW-UTILS", + "ver": "1e29548a5ff751b73c1a5a2a3f68d20bf2bd8bed", + "Node name for S&R": "TSColorMatch", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 81 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 109, + "type": "GetNode", + "pos": [ + -7835.2639076413225, + 6113.163676395045 + ], + "size": [ + 556.997285076427, + 58 + ], + "flags": { + "collapsed": false + }, + "order": 33, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "WANVAE", + "type": "WANVAE", + "links": [ + 743, + 777 + ] + } + ], + "title": "MODEL", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "VAE" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 273, + "type": "WanVideoSampler", + "pos": [ + -7435.66754462583, + 6109.389667566945 + ], + "size": [ + 474.61097003611394, + 705.2555368887706 + ], + "flags": {}, + "order": 81, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 730 + }, + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "link": 731 + }, + { + "name": "text_embeds", + "shape": 7, + "type": "WANVIDEOTEXTEMBEDS", + "link": 620 + }, + { + "name": "samples", + "shape": 7, + "type": "LATENT", + "link": null + }, + { + "name": "feta_args", + "shape": 7, + "type": "FETAARGS", + "link": null + }, + { + "name": "context_options", + "shape": 7, + "type": "WANVIDCONTEXT", + "link": null + }, + { + "name": "cache_args", + "shape": 7, + "type": "CACHEARGS", + "link": null + }, + { + "name": "flowedit_args", + "shape": 7, + "type": "FLOWEDITARGS", + "link": null + }, + { + "name": "slg_args", + "shape": 7, + "type": "SLGARGS", + "link": null + }, + { + "name": "loop_args", + "shape": 7, + "type": "LOOPARGS", + "link": null + }, + { + "name": "experimental_args", + "shape": 7, + "type": "EXPERIMENTALARGS", + "link": null + }, + { + "name": "sigmas", + "shape": 7, + "type": "SIGMAS", + "link": null + }, + { + "name": "unianimate_poses", + "shape": 7, + "type": "UNIANIMATE_POSE", + "link": null + }, + { + "name": "fantasytalking_embeds", + "shape": 7, + "type": "FANTASYTALKING_EMBEDS", + "link": null + }, + { + "name": "uni3c_embeds", + "shape": 7, + "type": "UNI3C_EMBEDS", + "link": 729 + }, + { + "name": "multitalk_embeds", + "shape": 7, + "type": "MULTITALK_EMBEDS", + "link": null + }, + { + "name": "freeinit_args", + "shape": 7, + "type": "FREEINITARGS", + "link": null + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "slot_index": 0, + "links": [ + 372 + ] + }, + { + "name": "denoised_samples", + "type": "LATENT", + "links": null + } + ], + "title": "L40", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoSampler", + "cnr_id": "ComfyUI-WanVideoWrapper", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 4, + 1, + 5, + 450251017970075, + "randomize", + true, + "dpm++_sde", + 0, + 0.75, + "", + "comfy", + 0, + -1, + false + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 533, + "type": "DrawViTPose", + "pos": [ + -8124.989659321877, + 6771.186017526201 + ], + "size": [ + 277.32624744481564, + 178 + ], + "flags": { + "collapsed": true + }, + "order": 69, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 707 + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 701 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 702 + } + ], + "outputs": [ + { + "name": "pose_images", + "type": "IMAGE", + "links": [] + } + ], + "title": "SET", + "properties": { + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ver": "1a35b81a418bbba093356ad19b19bf2a76a24f4e", + "Node name for S&R": "DrawViTPose", + "cnr_id": "ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 720, + 1280, + 16, + -1, + -1, + "True" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 306, + "type": "GetNode", + "pos": [ + -8281.591047688427, + 6770.556559313393 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": true + }, + "order": 34, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 411 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "upload video" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 310, + "type": "GetNode", + "pos": [ + -8389.4841643922, + 6769.931014366005 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 35, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 418, + 701 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "width" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 311, + "type": "GetNode", + "pos": [ + -8504.399500885105, + 6767.495300925393 + ], + "size": [ + 210, + 61.116781577941765 + ], + "flags": { + "collapsed": true + }, + "order": 36, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 419, + 702 + ] + } + ], + "title": "SET", + "properties": { + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "height" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 89, + "type": "PoseAndFaceDetection", + "pos": [ + -8550.906049694471, + 6628.790167182609 + ], + "size": [ + 368.1681814035719, + 186 + ], + "flags": {}, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "POSEMODEL", + "link": 128 + }, + { + "name": "images", + "type": "IMAGE", + "link": 411 + }, + { + "name": "retarget_image", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 418 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 419 + } + ], + "outputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "links": [ + 696, + 707 + ] + }, + { + "name": "face_images", + "type": "IMAGE", + "links": [ + 422 + ] + }, + { + "name": "key_frame_body_points", + "type": "STRING", + "links": null + }, + { + "name": "bboxes", + "type": "BBOX", + "links": null + }, + { + "name": "face_bboxes", + "type": "BBOX,", + "links": [ + 773 + ] + } + ], + "title": "L20", + "properties": { + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ver": "2fcbcae7eec637fdc712fdec18e6266feb8ba3a7", + "Node name for S&R": "PoseAndFaceDetection", + "cnr_id": "ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + 480, + 832, + 0 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 113, + "type": "SetNode", + "pos": [ + -8299.65110467738, + 6569.290986090206 + ], + "size": [ + 210.2760254328939, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 76, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 699 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [] + } + ], + "title": "SET", + "properties": { + "previousName": "pose_images", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 271, + "type": "WanVideoClipVisionEncode", + "pos": [ + -8339.126993583934, + 6112.795034640469 + ], + "size": [ + 225.9, + 405.6091024132711 + ], + "flags": { + "collapsed": false + }, + "order": 75, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 366 + }, + { + "name": "image_1", + "type": "IMAGE", + "link": 357 + }, + { + "name": "image_2", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "negative_image", + "shape": 7, + "type": "IMAGE", + "link": null + } + ], + "outputs": [ + { + "name": "image_embeds", + "type": "WANVIDIMAGE_CLIPEMBEDS", + "links": [ + 347 + ] + } + ], + "title": "CLIP", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "2bdd81a10b03c14443c79bdf3b783b1feb3d1fa3", + "Node name for S&R": "WanVideoClipVisionEncode", + "cnr_id": "ComfyUI-WanVideoWrapper", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 1, + 1, + "center", + "average", + true, + 0, + 0.5 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 270, + "type": "WanVideoAnimateEmbeds", + "pos": [ + -8114.256076384437, + 6112.319155791672 + ], + "size": [ + 279.5888137684278, + 405.2288396992153 + ], + "flags": {}, + "order": 79, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "WANVAE", + "link": 346 + }, + { + "name": "clip_embeds", + "shape": 7, + "type": "WANVIDIMAGE_CLIPEMBEDS", + "link": 347 + }, + { + "name": "ref_images", + "shape": 7, + "type": "IMAGE", + "link": 358 + }, + { + "name": "pose_images", + "shape": 7, + "type": "IMAGE", + "link": 423 + }, + { + "name": "face_images", + "shape": 7, + "type": "IMAGE", + "link": 424 + }, + { + "name": "bg_images", + "shape": 7, + "type": "IMAGE", + "link": 775 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": 774 + }, + { + "name": "start_ref_image", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 359 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 360 + }, + { + "name": "num_frames", + "type": "INT", + "widget": { + "name": "num_frames" + }, + "link": 393 + } + ], + "outputs": [ + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "links": [ + 731 + ] + } + ], + "title": "L30", + "properties": { + "aux_id": "kijai/ComfyUI-WanVideoWrapper", + "ver": "761b1d191e50d589465e31dc0d40ff7c59b1b7b0", + "Node name for S&R": "WanVideoAnimateEmbeds", + "cnr_id": "ComfyUI-WanVideoWrapper", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true, + "num_frames": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 832, + 480, + 81, + false, + 81, + "disabled", + 0.7, + 0.6, + false + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 532, + "type": "TSPoseDataSmoother", + "pos": [ + -8182.764094369587, + 6541.291204141554 + ], + "size": [ + 341.254927612169, + 277.0270988749817 + ], + "flags": {}, + "order": 68, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 696 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 699, + 782 + ] + }, + { + "name": "pose_data", + "type": "POSEDATA", + "links": null + } + ], + "title": "SKELETON", + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "9c4d0507ccd1f1c5b18fd536f2796e216560ed43", + "Node name for S&R": "TSPoseDataSmoother", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + true, + 0.7, + 12, + 2, + 0.35, + 0.6 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 68, + "type": "ImageResizeKJv2", + "pos": [ + -8554.206437216862, + 6112.336370671043 + ], + "size": [ + 218.0092031355216, + 365.5631984153547 + ], + "flags": { + "collapsed": false + }, + "order": 65, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 477 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 163 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 164 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 357, + 358 + ] + }, + { + "name": "width", + "type": "INT", + "links": [] + }, + { + "name": "height", + "type": "INT", + "links": [] + }, + { + "name": "mask", + "type": "MASK", + "links": null + } + ], + "title": "IMAGE", + "properties": { + "aux_id": "kijai/ComfyUI-KJNodes", + "ver": "ad37ce656c13e9abea002b46e3a89be3dba32355", + "Node name for S&R": "ImageResizeKJv2", + "cnr_id": "comfyui-kjnodes", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + 480, + 832, + "lanczos", + "crop", + "0, 0, 0", + "center", + 16, + "cpu" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 90, + "type": "OnnxDetectionModelLoader", + "pos": [ + -8551.780744367743, + 6506.186998078944 + ], + "size": [ + 370.3601847372395, + 106 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "model", + "type": "POSEMODEL", + "links": [ + 128 + ] + } + ], + "title": "MODEL", + "properties": { + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ver": "2fcbcae7eec637fdc712fdec18e6266feb8ba3a7", + "Node name for S&R": "OnnxDetectionModelLoader", + "cnr_id": "ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": [ + "vitpose_h_wholebody_model.onnx", + "yolov10m.onnx", + "CUDAExecutionProvider" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 582, + "type": "Label (rgthree)", + "pos": [ + -8556.024948412149, + 5882.386282081778 + ], + "size": [ + 472.8349609375, + 245 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 38, + "mode": 0, + "inputs": [], + "outputs": [], + "title": " \n \n \n \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒████▓▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▓███▒▒▒▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▓███▒▒▒▓███▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒▒▒▒▒▒▒███▓▒▒▒▓███▒▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▒▒▒▒▓███▒▒▒▒███████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒████▓▒▒▒▒████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓████▒▒▓█▒▒▒▒▓█████████▓▒▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒███████▓▒▒▓█████████▓▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒███▒▒▒▒█████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▓█████████████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒███▓▒▒▒▒▒▒▒▓▓████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▓▒███▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▓███▓▒▒▒▒▒▓██████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▒▒▓██████████▓▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒█████▓▓▒▒▒▒▒▒▒▒██████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▓████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒████████▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒██▓▒▒▒▒▒▒▒▓████████▓▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▒▒▒▒███▓▒▒▒▒▓▓▒▒▒▒▒████▒▒▒▓████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒█████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n \n \n \n ", + "properties": { + "fontSize": 7, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "Black", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 683, + "type": "Label (rgthree)", + "pos": [ + -8081.11319027215, + 5882.376288696122 + ], + "size": [ + 472.8349609375, + 245 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 39, + "mode": 0, + "inputs": [], + "outputs": [], + "title": " \n \n \n \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒████▓▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▓███▒▒▒▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▓███▒▒▒▓███▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒▒▒▒▒▒▒███▓▒▒▒▓███▒▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▒▒▒▒▓███▒▒▒▒███████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒████▓▒▒▒▒████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓████▒▒▓█▒▒▒▒▓█████████▓▒▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒███████▓▒▒▓█████████▓▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒███▒▒▒▒█████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▓█████████████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒███▓▒▒▒▒▒▒▒▓▓████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▓▒███▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▓███▓▒▒▒▒▒▓██████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▒▒▓██████████▓▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒█████▓▓▒▒▒▒▒▒▒▒██████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▓████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒████████▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒██▓▒▒▒▒▒▒▒▓████████▓▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▒▒▒▒███▓▒▒▒▒▓▓▒▒▒▒▒████▒▒▒▓████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒█████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n \n \n \n ", + "properties": { + "fontSize": 7, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "Black", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 684, + "type": "Label (rgthree)", + "pos": [ + -7606.04795800517, + 5882.394758435825 + ], + "size": [ + 472.8349609375, + 245 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 40, + "mode": 0, + "inputs": [], + "outputs": [], + "title": " \n \n \n \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒████▓▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▓███▒▒▒▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▓███▒▒▒▓███▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒▒▒▒▒▒▒███▓▒▒▒▓███▒▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▒▒▒▒▓███▒▒▒▒███████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒████▓▒▒▒▒████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓████▒▒▓█▒▒▒▒▓█████████▓▒▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒███████▓▒▒▓█████████▓▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒███▒▒▒▒█████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▓█████████████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒███▓▒▒▒▒▒▒▒▓▓████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▓▒███▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▓███▓▒▒▒▒▒▓██████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▒▒▓██████████▓▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒█████▓▓▒▒▒▒▒▒▒▒██████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▓████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒████████▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒██▓▒▒▒▒▒▒▒▓████████▓▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▒▒▒▒███▓▒▒▒▒▓▓▒▒▒▒▒████▒▒▒▓████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒█████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n \n \n \n ", + "properties": { + "fontSize": 7, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "Black", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 685, + "type": "Label (rgthree)", + "pos": [ + -7131.136199865171, + 5882.384765050169 + ], + "size": [ + 472.8349609375, + 245 + ], + "flags": { + "pinned": true, + "allow_interaction": false + }, + "order": 41, + "mode": 0, + "inputs": [], + "outputs": [], + "title": " \n \n \n \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒████▓▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▓███▒▒▒▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▓███▒▒▒▓███▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒▒▒▒▒▒▒███▓▒▒▒▓███▒▒▒▒▒▒▒▒██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▒▒▒▒▓███▒▒▒▒███████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒████▓▒▒▒▒████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓████▒▒▓█▒▒▒▒▓█████████▓▒▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒███████▓▒▒▓█████████▓▓██▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒█▓▒▒███▒▒▒▒█████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▓█████████████▓▒▒▒▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒███▓▒▒▒▒▒▒▒▓▓████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▓▒███▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▓███▓▒▒▒▒▒▓██████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▒▒▒▒▓██████████▓▓████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒█████▓▓▒▒▒▒▒▒▒▒██████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▒▒▒▒▓████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒████████▒▒▒▒▒▒▒▒▒▒▓██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▓▒██▓▒▒▒▒▒▒▒▓████████▓▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▒▒▒▒███▓▒▒▒▒▓▓▒▒▒▒▒████▒▒▒▓████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓███▓▒▒▒▒▒▒▒▒▒▒█████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████████████████▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓█████▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ \n \n \n \n ", + "properties": { + "fontSize": 7, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "Black", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 702, + "type": "TSVideoCombineNoMetadata", + "pos": [ + -8528.147282464117, + 6880.125132736853 + ], + "size": [ + 384.3740209883139, + 238 + ], + "flags": {}, + "order": 77, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 782 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "title": "VIDEO", + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cba89dd597152e08257e0bac588d59024196d49c", + "Node name for S&R": "TSVideoCombineNoMetadata", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 30, + 0, + "POSE", + "video/h264-mp4", + false, + true, + { + "hidden": false, + "paused": false, + "params": { + "filename": "POSE_00001.mp4", + "subfolder": "", + "type": "output", + "format": "video/h264-mp4", + "frame_rate": 30, + "workflow": "POSE_00001.png", + "fullpath": "/workspace/ComfyUI/output/POSE_00001.mp4" + }, + "muted": true + } + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 703, + "type": "TSVideoCombineNoMetadata", + "pos": [ + -6576.041791362008, + 5965.4821465815085 + ], + "size": [ + 729.8685917595589, + 329.7855250670482 + ], + "flags": {}, + "order": 90, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 783 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": 784 + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "title": "VIDEO", + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cba89dd597152e08257e0bac588d59024196d49c", + "Node name for S&R": "TSVideoCombineNoMetadata", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 30, + 0, + "POSE", + "video/h264-mp4", + false, + true, + { + "hidden": false, + "paused": false, + "params": {}, + "muted": true + } + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 704, + "type": "TSVideoCombineNoMetadata", + "pos": [ + -5751.948462528763, + 5962.264044778359 + ], + "size": [ + 1087.0095334001594, + 340.5400177211295 + ], + "flags": {}, + "order": 91, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 785 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": 786 + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "title": "SAVE VIDEO", + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cba89dd597152e08257e0bac588d59024196d49c", + "Node name for S&R": "TSVideoCombineNoMetadata", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 30, + 0, + "POSE", + "video/h264-mp4", + false, + true, + { + "hidden": false, + "paused": false, + "params": {}, + "muted": true + } + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 568, + "type": "ImageFromBatch", + "pos": [ + -6979.210805125209, + 6883.8434628354635 + ], + "size": [ + 270, + 82 + ], + "flags": { + "collapsed": true + }, + "order": 48, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 765 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 788 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.60", + "Node name for S&R": "ImageFromBatch", + "ue_properties": { + "widget_ue_connectable": { + "batch_index": true, + "length": true + }, + "version": "7.1", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 0, + 1 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 564, + "type": "ImageFromBatch", + "pos": [ + -6979.608485214749, + 6912.801871652865 + ], + "size": [ + 270, + 82 + ], + "flags": { + "collapsed": true + }, + "order": 85, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 759 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 787 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.60", + "Node name for S&R": "ImageFromBatch", + "ue_properties": { + "widget_ue_connectable": { + "batch_index": true, + "length": true + }, + "version": "7.1", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 0, + 1 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 705, + "type": "TSPreviewImageNoMetadata", + "pos": [ + -8109.299683995111, + 6877.691710991631 + ], + "size": [ + 249.9, + 26 + ], + "flags": {}, + "order": 89, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 787 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null + } + ], + "title": "IMAGE", + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cba89dd597152e08257e0bac588d59024196d49c", + "Node name for S&R": "TSPreviewImageNoMetadata", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 706, + "type": "TSPreviewImageNoMetadata", + "pos": [ + -7840.7489895810395, + 6876.720224665691 + ], + "size": [ + 249.9, + 26 + ], + "flags": {}, + "order": 64, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 788 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null + } + ], + "title": "IMAGE", + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cba89dd597152e08257e0bac588d59024196d49c", + "Node name for S&R": "TSPreviewImageNoMetadata", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 493, + "type": "CLIPTextEncode", + "pos": [ + -9655.1816274107, + 6843.895227036905 + ], + "size": [ + 1124.7612388907737, + 127.11197819902372 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 624 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 616 + ] + } + ], + "title": "POSITIVE PROMPT", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CLIPTextEncode", + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "woman dancing, static locked-off shot" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 536, + "type": "Label (rgthree)", + "pos": [ + -9715.580970551535, + 5633.141389586933 + ], + "size": [ + 1882.20703125, + 130 + ], + "flags": { + "allow_interaction": true + }, + "order": 42, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "https://t.me/+l1nCXAoNBudhZjZi", + "properties": { + "fontSize": 130, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 660, + "type": "FancyTimerNode", + "pos": [ + -8903.312325436662, + 5914.3207023371415 + ], + "size": [ + 347.88470122079525, + 892.6634972420788 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Execution Timer", + "properties": { + "cnr_id": "crt-nodes", + "ver": "2.1.7", + "elapsed_time_str": "00:23:730", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 75, + "type": "VHS_LoadVideo", + "pos": [ + -9648.968480695623, + 5918.080847771493 + ], + "size": [ + 323.21207580606324, + 310 + ], + "flags": { + "collapsed": false + }, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 325, + 409, + 414, + 776 + ] + }, + { + "name": "frame_count", + "type": "INT", + "links": [ + 391 + ] + }, + { + "name": "audio", + "type": "AUDIO", + "links": [ + 211, + 786 + ] + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null + } + ], + "properties": { + "aux_id": "Kosinkadink/ComfyUI-VideoHelperSuite", + "ver": "8550981384301e9bc5bfea83e5c2c75258102593", + "Node name for S&R": "VHS_LoadVideo", + "cnr_id": "comfyui-videohelpersuite", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + }, + "_cache": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "widgets_values": { + "video": "tiktok_ev1amp1_7541508700087880982.mp4", + "force_rate": 30, + "custom_width": 720, + "custom_height": 1280, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1, + "format": "Wan", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "tiktok_ev1amp1_7541508700087880982.mp4", + "type": "input", + "format": "video/mp4", + "force_rate": 30, + "custom_width": 720, + "custom_height": 1280, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1 + } + } + }, + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 701, + "type": "Label (rgthree)", + "pos": [ + -9650.33490984981, + 7223.711187983238 + ], + "size": [ + 869.248046875, + 130 + ], + "flags": { + "allow_interaction": true + }, + "order": 45, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "ANIMATOR V2", + "properties": { + "fontSize": 130, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 707, + "type": "Label (rgthree)", + "pos": [ + -9061.21286514673, + 7300.442902071386 + ], + "size": [ + 869.248046875, + 130 + ], + "flags": { + "allow_interaction": true + }, + "order": 46, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "ANIMATOR V2", + "properties": { + "fontSize": 130, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + }, + { + "id": 708, + "type": "Label (rgthree)", + "pos": [ + -9712.957719554255, + 5510.317035298682 + ], + "size": [ + 1222.177734375, + 130 + ], + "flags": { + "allow_interaction": true + }, + "order": 47, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "AI SYNDICATE [18+]", + "properties": { + "fontSize": 130, + "fontFamily": "Arial", + "fontColor": "#892C8B82", + "textAlign": "left", + "backgroundColor": "transparent", + "padding": 0, + "borderRadius": 0, + "angle": 0, + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "color": "#fff0", + "bgcolor": "#fff0" + } + ], + "links": [ + [ + 128, + 90, + 0, + 89, + 0, + "POSEMODEL" + ], + [ + 163, + 102, + 0, + 68, + 2, + "INT" + ], + [ + 164, + 103, + 0, + 68, + 3, + "INT" + ], + [ + 169, + 59, + 0, + 106, + 0, + "*" + ], + [ + 211, + 75, + 2, + 130, + 0, + "*" + ], + [ + 242, + 28, + 0, + 158, + 0, + "IMAGE" + ], + [ + 325, + 75, + 0, + 246, + 0, + "IMAGE" + ], + [ + 346, + 266, + 0, + 270, + 0, + "WANVAE" + ], + [ + 347, + 271, + 0, + 270, + 1, + "WANVIDIMAGE_CLIPEMBEDS" + ], + [ + 357, + 68, + 0, + 271, + 1, + "IMAGE" + ], + [ + 358, + 68, + 0, + 270, + 2, + "IMAGE" + ], + [ + 359, + 102, + 0, + 270, + 8, + "INT" + ], + [ + 360, + 103, + 0, + 270, + 9, + "INT" + ], + [ + 366, + 128, + 0, + 271, + 0, + "CLIP_VISION" + ], + [ + 372, + 273, + 0, + 28, + 1, + "LATENT" + ], + [ + 391, + 75, + 1, + 290, + 0, + "INT" + ], + [ + 393, + 291, + 0, + 270, + 10, + "INT" + ], + [ + 409, + 75, + 0, + 304, + 0, + "IMAGE" + ], + [ + 411, + 306, + 0, + 89, + 1, + "IMAGE" + ], + [ + 412, + 309, + 1, + 307, + 0, + "INT" + ], + [ + 413, + 309, + 2, + 308, + 0, + "INT" + ], + [ + 414, + 75, + 0, + 309, + 0, + "IMAGE" + ], + [ + 418, + 310, + 0, + 89, + 3, + "INT" + ], + [ + 419, + 311, + 0, + 89, + 4, + "INT" + ], + [ + 422, + 89, + 1, + 314, + 0, + "IMAGE" + ], + [ + 423, + 313, + 0, + 270, + 3, + "IMAGE" + ], + [ + 424, + 315, + 0, + 270, + 4, + "IMAGE" + ], + [ + 437, + 78, + 0, + 321, + 0, + "WANVIDEOMODEL" + ], + [ + 460, + 349, + 0, + 348, + 0, + "WANVAE" + ], + [ + 463, + 354, + 0, + 78, + 1, + "WANVIDLORA" + ], + [ + 471, + 22, + 0, + 78, + 0, + "WANVIDEOMODEL" + ], + [ + 477, + 298, + 0, + 68, + 0, + "IMAGE" + ], + [ + 616, + 493, + 0, + 490, + 0, + "CONDITIONING" + ], + [ + 620, + 490, + 0, + 273, + 2, + "WANVIDEOTEXTEMBEDS" + ], + [ + 622, + 488, + 0, + 490, + 1, + "CONDITIONING" + ], + [ + 623, + 494, + 0, + 488, + 0, + "CLIP" + ], + [ + 624, + 494, + 0, + 493, + 0, + "CLIP" + ], + [ + 625, + 491, + 0, + 342, + 0, + "CLIP" + ], + [ + 626, + 76, + 0, + 298, + 0, + "IMAGE" + ], + [ + 696, + 89, + 0, + 532, + 0, + "POSEDATA" + ], + [ + 699, + 532, + 0, + 113, + 0, + "IMAGE" + ], + [ + 701, + 310, + 0, + 533, + 1, + "INT" + ], + [ + 702, + 311, + 0, + 533, + 2, + "INT" + ], + [ + 707, + 89, + 0, + 533, + 0, + "POSEDATA" + ], + [ + 725, + 552, + 0, + 553, + 0, + "WANVIDEOCONTROLNET" + ], + [ + 729, + 553, + 0, + 273, + 14, + "UNI3C_EMBEDS" + ], + [ + 730, + 322, + 0, + 273, + 0, + "WANVIDEOMODEL" + ], + [ + 731, + 270, + 0, + 273, + 1, + "WANVIDIMAGE_EMBEDS" + ], + [ + 743, + 109, + 0, + 28, + 0, + "WANVAE" + ], + [ + 752, + 28, + 0, + 562, + 0, + "IMAGE" + ], + [ + 759, + 565, + 0, + 564, + 0, + "IMAGE" + ], + [ + 760, + 570, + 0, + 565, + 0, + "IMAGE" + ], + [ + 761, + 566, + 0, + 565, + 1, + "MASK" + ], + [ + 762, + 574, + 0, + 566, + 0, + "MASK" + ], + [ + 763, + 571, + 0, + 567, + 0, + "SAM2MODEL" + ], + [ + 764, + 570, + 0, + 567, + 1, + "IMAGE" + ], + [ + 765, + 570, + 0, + 568, + 0, + "IMAGE" + ], + [ + 768, + 566, + 0, + 572, + 0, + "MASK" + ], + [ + 769, + 565, + 0, + 573, + 0, + "IMAGE" + ], + [ + 770, + 567, + 0, + 574, + 0, + "MASK" + ], + [ + 771, + 576, + 0, + 574, + 1, + "INT" + ], + [ + 772, + 575, + 0, + 576, + 0, + "FLOAT" + ], + [ + 773, + 89, + 4, + 567, + 4, + "BBOX" + ], + [ + 774, + 580, + 0, + 270, + 6, + "MASK" + ], + [ + 775, + 578, + 0, + 270, + 5, + "IMAGE" + ], + [ + 776, + 75, + 0, + 581, + 1, + "IMAGE" + ], + [ + 777, + 109, + 0, + 581, + 0, + "WANVAE" + ], + [ + 778, + 581, + 0, + 553, + 1, + "LATENT" + ], + [ + 782, + 532, + 0, + 702, + 0, + "IMAGE" + ], + [ + 783, + 158, + 0, + 703, + 0, + "IMAGE" + ], + [ + 784, + 131, + 0, + 703, + 1, + "AUDIO" + ], + [ + 785, + 562, + 0, + 704, + 0, + "IMAGE" + ], + [ + 786, + 75, + 2, + 704, + 1, + "AUDIO" + ], + [ + 787, + 564, + 0, + 705, + 0, + "IMAGE" + ], + [ + 788, + 568, + 0, + 706, + 0, + "IMAGE" + ] + ], + "groups": [ + { + "id": 17, + "title": "UPLOAD FILES", + "bounding": [ + -9647.339798447581, + 5851.729255368294, + 1093.0209526884282, + 959.4033916191966 + ], + "color": "#a1309b", + "font_size": 24, + "flags": {} + }, + { + "id": 21, + "title": "SAMPLING", + "bounding": [ + -8553.740071023583, + 5851.31292932664, + 1878.7563613864213, + 960.2215127352238 + ], + "color": "#a1309b", + "font_size": 24, + "flags": {} + }, + { + "id": 26, + "title": "POSE", + "bounding": [ + -8532.741793536821, + 6812.105196510173, + 1861.7103276699054, + 608.9447583100164 + ], + "color": "#a1309b", + "font_size": 24, + "flags": {} + }, + { + "id": 27, + "title": "LOAD MODELS", + "bounding": [ + -7545.982406363826, + 6815.511644320396, + 806.5767444438889, + 605.9039349668546 + ], + "color": "#a1309b", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 0.4177248169415712, + "offset": [ + 10559.202776970425, + -5009.370988724576 + ] + }, + "frontendVersion": "1.38.14", + "workflowRendererVersion": "LG", + "node_versions": { + "ComfyUI-WanVideoWrapper": "5a2383621a05825d0d0437781afcb8552d9590fd", + "comfy-core": "0.3.26", + "ComfyUI-KJNodes": "a5bd3c86c8ed6b83c55c2d0e7a59515b15a0137f", + "ComfyUI-VideoHelperSuite": "0a75c7958fe320efcb052f1d9f8451fd20c730a8" + }, + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true, + "ue_links": [], + "links_added_by_ue": [], + "workflowHash": "d0852aacd9419dc54f287a17fb358b86e9cfc8d85e1404052f8892dd11538b80" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/comfyui-animator-nodes/README.md b/zavodik/nodes/comfyui-animator-nodes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/zavodik/nodes/comfyui-animator-nodes/__init__.py b/zavodik/nodes/comfyui-animator-nodes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d3ce640495fe0bab5477dc73484c3f241da1a90 --- /dev/null +++ b/zavodik/nodes/comfyui-animator-nodes/__init__.py @@ -0,0 +1,20 @@ +from .save_load_pose import TSSavePoseDataAsPickle, TSLoadPoseDataPickle +from .openpose_smoother import KPSSmoothPoseDataAndRender +from .load_video_batch import LoadVideoBatchListFromDir +from .rename_files import RenameFilesInDir + +NODE_CLASS_MAPPINGS = { + "TSSavePoseDataAsPickle": TSSavePoseDataAsPickle, + "TSLoadPoseDataPickle": TSLoadPoseDataPickle, + "TSPoseDataSmoother": KPSSmoothPoseDataAndRender, + "TSLoadVideoBatchListFromDir": LoadVideoBatchListFromDir, + "TSRenameFilesInDir": RenameFilesInDir, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TSSavePoseDataAsPickle": "TS Save Pose Data (PKL)", + "TSLoadPoseDataPickle": "TS Load Pose Data (PKL)", + "TSPoseDataSmoother": "TS Pose Data Smoother", + "TSLoadVideoBatchListFromDir": "TS Load Video Batch List From Dir", + "TSRenameFilesInDir": "TS Rename Files In Dir", +} diff --git a/zavodik/nodes/comfyui-animator-nodes/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/comfyui-animator-nodes/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a79a758c5ba70633ec67a18516d9b4cb436e8878 Binary files /dev/null and b/zavodik/nodes/comfyui-animator-nodes/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-animator-nodes/__pycache__/load_video_batch.cpython-313.pyc b/zavodik/nodes/comfyui-animator-nodes/__pycache__/load_video_batch.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bf2506db7b31a9e061fdfb9c0d42b432fb03c53 Binary files /dev/null and b/zavodik/nodes/comfyui-animator-nodes/__pycache__/load_video_batch.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-animator-nodes/__pycache__/openpose_smoother.cpython-313.pyc b/zavodik/nodes/comfyui-animator-nodes/__pycache__/openpose_smoother.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8919079c5ca1cfe5df195c39e873edb82857421 Binary files /dev/null and b/zavodik/nodes/comfyui-animator-nodes/__pycache__/openpose_smoother.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-animator-nodes/__pycache__/rename_files.cpython-313.pyc b/zavodik/nodes/comfyui-animator-nodes/__pycache__/rename_files.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7af9c447f0b584ec27cd5bfa70bf62aeb876631 Binary files /dev/null and b/zavodik/nodes/comfyui-animator-nodes/__pycache__/rename_files.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-animator-nodes/__pycache__/save_load_pose.cpython-313.pyc b/zavodik/nodes/comfyui-animator-nodes/__pycache__/save_load_pose.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad9a7970f77d03b9985937302c9fc01529b73add Binary files /dev/null and b/zavodik/nodes/comfyui-animator-nodes/__pycache__/save_load_pose.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-animator-nodes/__pycache__/utils.cpython-313.pyc b/zavodik/nodes/comfyui-animator-nodes/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84e0546e3643220b3d5a2cdfd2fce01224dab8b9 Binary files /dev/null and b/zavodik/nodes/comfyui-animator-nodes/__pycache__/utils.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-animator-nodes/load_video_batch.py b/zavodik/nodes/comfyui-animator-nodes/load_video_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ae5835bf342610aa1d53107bfb4c8b84ebf8fb --- /dev/null +++ b/zavodik/nodes/comfyui-animator-nodes/load_video_batch.py @@ -0,0 +1,351 @@ +import os +import re +import shutil +import subprocess +import time +from collections.abc import Mapping + +import torch +import numpy as np + +# OpenCV for video decoding +try: + import cv2 + + _has_cv2 = True +except Exception: + _has_cv2 = False + + +# ========================= +# AUDIO (встроено из utils) +# ========================= +ENCODE_ARGS = ("utf-8", "backslashreplace") + + +def _pick_ffmpeg_path(): + # 1) env override (как в VHS) + if "VHS_FORCE_FFMPEG_PATH" in os.environ: + p = os.environ.get("VHS_FORCE_FFMPEG_PATH") + if p: + return p + + # 2) system ffmpeg + system_ffmpeg = shutil.which("ffmpeg") + if system_ffmpeg is not None: + return system_ffmpeg + + # 3) local рядом + if os.path.isfile("ffmpeg"): + return os.path.abspath("ffmpeg") + if os.path.isfile("ffmpeg.exe"): + return os.path.abspath("ffmpeg.exe") + + return None + + +ffmpeg_path = _pick_ffmpeg_path() + + +def get_audio(file, start_time=0, duration=0): + if ffmpeg_path is None: + raise Exception("ffmpeg not found. Put ffmpeg in PATH, or set VHS_FORCE_FFMPEG_PATH env var.") + + args = [ffmpeg_path, "-i", file] + if start_time > 0: + args += ["-ss", str(start_time)] + if duration > 0: + args += ["-t", str(duration)] + + try: + # как в utils: вытаскиваем raw f32le в stdout + res = subprocess.run(args + ["-f", "f32le", "-"], capture_output=True, check=True) + audio = torch.frombuffer(bytearray(res.stdout), dtype=torch.float32) + match = re.search(r", (\d+) Hz, (\w+), ", res.stderr.decode(*ENCODE_ARGS)) + except subprocess.CalledProcessError as e: + raise Exception(f"Failed to extract audio from {file}:\n" + e.stderr.decode(*ENCODE_ARGS)) + + if match: + ar = int(match.group(1)) + ac = {"mono": 1, "stereo": 2}.get(match.group(2), 2) + else: + ar = 44100 + ac = 2 + + # reshape как в utils: (-1, channels) -> (channels, samples) -> (1, channels, samples) + if audio.numel() == 0: + # видео без аудио — вернем пустой аудиобуфер, но корректный формат + empty = torch.zeros((1, 1, 0), dtype=torch.float32) + return {"waveform": empty, "sample_rate": ar} + + audio = audio.reshape((-1, ac)).transpose(0, 1).unsqueeze(0) + return {"waveform": audio, "sample_rate": ar} + + +class LazyAudioMap(Mapping): + def __init__(self, file, start_time, duration): + self.file = file + self.start_time = start_time + self.duration = duration + self._dict = None + + def _ensure(self): + if self._dict is None: + self._dict = get_audio(self.file, self.start_time, self.duration) + + def __getitem__(self, key): + self._ensure() + return self._dict[key] + + def __iter__(self): + self._ensure() + return iter(self._dict) + + def __len__(self): + self._ensure() + return len(self._dict) + + +def lazy_get_audio(file, start_time=0, duration=0, **kwargs): + return LazyAudioMap(file, start_time, duration) + + +# ========================= +# остальной код ноды +# ========================= + + +def extract_first_number(s): + match = re.search(r"\d+", s) + return int(match.group()) if match else float("inf") + + +sort_methods = [ + "None", + "Alphabetical (ASC)", + "Alphabetical (DESC)", + "Numerical (ASC)", + "Numerical (DESC)", + "Datetime (ASC)", + "Datetime (DESC)", +] + + +def sort_by(items, base_path=".", method=None): + def fullpath(x): + return os.path.join(base_path, x) + + def get_timestamp(path): + try: + return os.path.getmtime(path) + except FileNotFoundError: + return float("-inf") + + if method == "Alphabetical (ASC)": + return sorted(items) + elif method == "Alphabetical (DESC)": + return sorted(items, reverse=True) + elif method == "Numerical (ASC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0])) + elif method == "Numerical (DESC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True) + elif method == "Datetime (ASC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x))) + elif method == "Datetime (DESC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True) + else: + return items + + +def target_size(width, height, custom_width, custom_height, downscale_ratio=8): + if downscale_ratio is None: + downscale_ratio = 8 + + if custom_width == 0 and custom_height == 0: + new_w, new_h = width, height + elif custom_height == 0: + new_h = int(height * (custom_width / width)) + new_w = int(custom_width) + elif custom_width == 0: + new_w = int(width * (custom_height / height)) + new_h = int(custom_height) + else: + new_w, new_h = int(custom_width), int(custom_height) + + new_w = int(new_w / downscale_ratio + 0.5) * downscale_ratio + new_h = int(new_h / downscale_ratio + 0.5) * downscale_ratio + return new_w, new_h + + +def _read_frames_vhs_like( + video_path: str, + force_rate: float = 0, + custom_width: int = 0, + custom_height: int = 0, + downscale_ratio: int = 8, + frame_load_cap: int = 0, +): + if not _has_cv2: + raise RuntimeError("OpenCV (cv2) not available. Install opencv-python.") + + cap = cv2.VideoCapture(video_path) + if not cap.isOpened() or not cap.grab(): + raise FileNotFoundError(f"Cannot open video: {video_path}") + + fps = cap.get(cv2.CAP_PROP_FPS) + if fps is None or fps <= 0: + fps = 30.0 + + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + ok0, frame0 = cap.retrieve() + if not ok0 or frame0 is None: + cap.release() + raise RuntimeError(f"Cannot retrieve first frame from: {video_path}") + + if width <= 0 or height <= 0: + height, width = frame0.shape[:2] + + base_dt = 1.0 / float(fps) + target_dt = base_dt if force_rate == 0 else (1.0 / float(force_rate)) + loaded_fps = 1.0 / target_dt if target_dt > 0 else float(fps) + + new_w, new_h = target_size(width, height, custom_width, custom_height, downscale_ratio) + do_resize = (new_w != width) or (new_h != height) + + frames = [] + time_offset = target_dt + + def _process_frame(bgr): + rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) + if do_resize: + rgb = cv2.resize(rgb, (new_w, new_h), interpolation=cv2.INTER_LANCZOS4) + return rgb + + frames.append(_process_frame(frame0)) + if frame_load_cap > 0 and len(frames) >= frame_load_cap: + cap.release() + arr = np.stack(frames, axis=0).astype(np.float32) / 255.0 + t = torch.from_numpy(arr) + return t, float(fps), float(loaded_fps), float(len(t) * target_dt), 0.0 + + time_offset -= target_dt + + while cap.isOpened(): + if time_offset < target_dt: + ok = cap.grab() + if not ok: + break + time_offset += base_dt + continue + + ok, frame_bgr = cap.retrieve() + if not ok or frame_bgr is None: + break + + frames.append(_process_frame(frame_bgr)) + + if frame_load_cap > 0 and len(frames) >= frame_load_cap: + break + + time_offset -= target_dt + + cap.release() + + if len(frames) == 0: + raise RuntimeError(f"No frames could be read from: {video_path}") + + arr = np.stack(frames, axis=0).astype(np.float32) / 255.0 + t = torch.from_numpy(arr) + loaded_duration = float(len(t) * target_dt) + return t, float(fps), float(loaded_fps), loaded_duration, 0.0 + + +class LoadVideoBatchListFromDir: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"default": ""}), + "force_rate": ("FLOAT", {"default": 0, "min": 0, "max": 120, "step": 1}), + "width": ("INT", {"default": 720, "min": 0, "max": 8192, "step": 1}), + "height": ("INT", {"default": 1280, "min": 0, "max": 8192, "step": 1}), + }, + "optional": { + "video_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "step": 1}), + "load_always": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "sort_method": (sort_methods,), + }, + } + + RETURN_TYPES = ("IMAGE", "AUDIO", "INT") + RETURN_NAMES = ("IMAGE", "audio", "COUNT") + OUTPUT_IS_LIST = (True, True, False) + + FUNCTION = "load_videos" + CATEGORY = "video" + + @classmethod + def IS_CHANGED(cls, **kwargs): + if kwargs.get("load_always"): + return float("NaN") + return hash(frozenset(kwargs.items())) + + def load_videos( + self, + directory: str, + force_rate: float = 0, + width: int = 0, + height: int = 0, + video_load_cap: int = 0, + frame_load_cap: int = 0, + start_index: int = 0, + load_always: bool = False, + sort_method=None, + ): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory}' cannot be found.") + + files = os.listdir(directory) + if len(files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + + valid_ext = {".mp4", ".mov", ".mkv", ".webm", ".avi", ".m4v"} + files = [ + f + for f in files + if os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1].lower() in valid_ext + ] + if len(files) == 0: + raise FileNotFoundError(f"No video files in directory '{directory}' (expected: {sorted(valid_ext)}).") + + files = sort_by(files, directory, sort_method) + files = files[start_index:] + if video_load_cap > 0: + files = files[:video_load_cap] + + images_list = [] + audios_list = [] + + for fname in files: + path = os.path.join(directory, fname) + + vid, source_fps, loaded_fps, loaded_duration, start_time = _read_frames_vhs_like( + path, + force_rate=force_rate, + custom_width=width, + custom_height=height, + downscale_ratio=8, + frame_load_cap=frame_load_cap, + ) + + images_list.append(vid) + + # duration based on loaded frames/time + audio = lazy_get_audio(path, start_time, loaded_duration) + audios_list.append(audio) + + return (images_list, audios_list, len(images_list)) diff --git a/zavodik/nodes/comfyui-animator-nodes/openpose_smoother.py b/zavodik/nodes/comfyui-animator-nodes/openpose_smoother.py new file mode 100644 index 0000000000000000000000000000000000000000..80a63a5ac823fad26a37f8b6b3a086d69bfc3c9a --- /dev/null +++ b/zavodik/nodes/comfyui-animator-nodes/openpose_smoother.py @@ -0,0 +1,2481 @@ +from __future__ import annotations + +import copy +import math +import pickle +import threading +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import cv2 +import torch + + +# ============================================================ +# ComfyUI Node (pose_data + PKL) +# ============================================================ + +_GLOBAL_LOCK = threading.Lock() + + +class KPSSmoothPoseDataAndRender: + """ + Сглаживание + рендер позы. + Вход: POSEDATA (как объект/dict; обычно приходит из TSLoadPoseDataPickle). + Выход: IMAGE (torch [T,H,W,3] float 0..1), POSEDATA (в том же формате, но сглаженный). + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "pose_data": ("POSEDATA",), # <-- ВАЖНО: именно POSEDATA + "filter_extra_people": ("BOOLEAN", {"default": True}), + # общий набор параметров сглаживания (вместо body + face_hands) + "smooth_alpha": ("FLOAT", {"default": 0.7, "min": 0.01, "max": 0.99, "step": 0.01}), + "gap_frames": ("INT", {"default": 12, "min": 0, "max": 100, "step": 1}), + "min_run_frames": ("INT", {"default": 2, "min": 1, "max": 60, "step": 1}), + # пороги отрисовки (в инпут добавляем body/hands, face НЕ добавляем) + "conf_thresh_body": ("FLOAT", {"default": 0.20, "min": 0.0, "max": 1.0, "step": 0.01}), + "conf_thresh_hands": ("FLOAT", {"default": 0.50, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("IMAGE", "POSEDATA") # <-- ВАЖНО: именно POSEDATA + RETURN_NAMES = ("IMAGE", "pose_data") + FUNCTION = "run" + CATEGORY = "posedata" + + def run(self, pose_data, **kwargs): + filter_extra_people = bool(kwargs.get("filter_extra_people", True)) + + # общий набор + smooth_alpha = float(kwargs.get("smooth_alpha", 0.7)) + gap_frames = int(kwargs.get("gap_frames", 12)) + min_run_frames = int(kwargs.get("min_run_frames", 2)) + + # пороги рендера + conf_thresh_body = float(kwargs.get("conf_thresh_body", 0.20)) + conf_thresh_hands = float(kwargs.get("conf_thresh_hands", 0.50)) + conf_thresh_face = 0.20 # <- НЕ добавляем в INPUT, но фиксируем как ты просил + + force_body_18 = bool(kwargs.get("force_body_18", False)) + + pose_data = _coerce_pose_data_to_obj(pose_data) + + # pose_data -> frames_json_like + frames_json_like, meta_ref = _pose_data_to_kps_frames(pose_data, force_body_18=force_body_18) + + with _GLOBAL_LOCK: + old = _snapshot_tunable_globals() + try: + # BODY + globals()["ALPHA_BODY"] = smooth_alpha + globals()["SUPER_SMOOTH_ALPHA"] = smooth_alpha + globals()["MAX_GAP_FRAMES"] = gap_frames + globals()["MIN_RUN_FRAMES"] = min_run_frames + + # FACE+HANDS (dense) тоже от общего набора + globals()["DENSE_SUPER_SMOOTH_ALPHA"] = smooth_alpha + globals()["DENSE_MAX_GAP_FRAMES"] = gap_frames + globals()["DENSE_MIN_RUN_FRAMES"] = min_run_frames + + globals()["FILTER_EXTRA_PEOPLE"] = filter_extra_people + + smoothed_frames = smooth_KPS_json_obj( + frames_json_like, + keep_face_untouched=False, + keep_hands_untouched=False, + filter_extra_people=filter_extra_people, + ) + finally: + _restore_tunable_globals(old) + + # frames_json_like -> pose_data (обратно в pose_metas) + out_pose_data = _kps_frames_to_pose_data(pose_data, smoothed_frames, meta_ref, force_body_18=force_body_18) + + # render + w, h = _extract_canvas_wh(smoothed_frames, default_w=720, default_h=1280) + frames_np = [] + for fr in smoothed_frames: + if isinstance(fr, dict) and fr.get("people"): + img = _draw_pose_frame_full( + w, + h, + fr["people"][0], + conf_thresh_body=conf_thresh_body, + conf_thresh_hands=conf_thresh_hands, + conf_thresh_face=conf_thresh_face, + ) + else: + img = np.zeros((h, w, 3), dtype=np.uint8) + frames_np.append(img) + + frames_t = torch.from_numpy(np.stack(frames_np, axis=0)).float() / 255.0 + return (frames_t, out_pose_data) + + +# ============================================================ +# PKL / pose_data IO +# ============================================================ + + +class _PoseDummyObj: + def __init__(self, *a, **k): + pass + + def __setstate__(self, state): + # поддержка dict и (dict, slotstate) + if isinstance(state, dict): + self.__dict__.update(state) + elif isinstance(state, (list, tuple)) and len(state) == 2 and isinstance(state[0], dict): + self.__dict__.update(state[0]) + if isinstance(state[1], dict): + self.__dict__.update(state[1]) + else: + self.__dict__["_slotstate"] = state[1] + else: + self.__dict__["_state"] = state + + +class _SafeUnpickler(pickle.Unpickler): + """ + Безопасно грузим PKL из ComfyUI окружения: + - ремап numpy._core -> numpy.core + - неизвестные классы (WanAnimatePreprocess.*) превращаем в простые объекты с __dict__ + """ + + def find_class(self, module, name): + # ремап внутренних путей numpy (частая проблема между версиями) + if module.startswith("numpy._core"): + module = module.replace("numpy._core", "numpy.core", 1) + if module.startswith("numpy._globals"): + module = module.replace("numpy._globals", "numpy", 1) + + # конкретные классы метаданных (если встречаются) + if name in {"AAPoseMeta"}: + return _PoseDummyObj + + try: + return super().find_class(module, name) + except Exception: + return _PoseDummyObj + + +def _load_pose_data_pkl(path: str) -> Any: + with open(path, "rb") as f: + return _SafeUnpickler(f).load() + + +def _coerce_pose_data_to_obj(pd: Any) -> Any: + """ + Accepts: + - dict pose_data + - object with attributes like .pose_metas (AAPoseMeta-like) + - str path to .pkl + - dict wrapper with 'pose_data' + """ + if isinstance(pd, str): + obj = _load_pose_data_pkl(pd) + return obj + + if isinstance(pd, dict) and "pose_data" in pd: + return pd["pose_data"] + + return pd + + +# ============================================================ +# pose_data <-> JSON-like KPS frames +# ============================================================ + + +def _as_attr(x: Any, key: str, default=None): + if isinstance(x, dict): + return x.get(key, default) + return getattr(x, key, default) + + +def _set_attr(x: Any, key: str, value: Any): + if isinstance(x, dict): + x[key] = value + else: + setattr(x, key, value) + + +def _xy_p_to_flat(xy: Optional[np.ndarray], p: Optional[np.ndarray]) -> Optional[List[float]]: + if xy is None: + return None + arr = np.asarray(xy) + if arr.ndim != 2 or arr.shape[1] < 2: + return None + N = arr.shape[0] + if p is None: + pp = np.ones((N,), dtype=np.float32) + else: + pp = np.asarray(p).reshape(-1) + if pp.shape[0] != N: + # если вдруг не совпали — подстрахуемся + pp = np.ones((N,), dtype=np.float32) + + out: List[float] = [] + for i in range(N): + out.extend([float(arr[i, 0]), float(arr[i, 1]), float(pp[i])]) + return out + + +def _flat_to_xy_p(flat: Optional[List[float]]) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]: + if not isinstance(flat, list) or len(flat) % 3 != 0: + return None, None + N = len(flat) // 3 + xy = np.zeros((N, 2), dtype=np.float32) + p = np.zeros((N,), dtype=np.float32) + for i in range(N): + xy[i, 0] = float(flat[3 * i + 0]) + xy[i, 1] = float(flat[3 * i + 1]) + p[i] = float(flat[3 * i + 2]) + return xy, p + + +def _pose_data_to_kps_frames(pose_data: Any, *, force_body_18: bool) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: + """ + Делает "как JSON" список кадров: + frame = {"people":[{pose_keypoints_2d, face_keypoints_2d, hand_left_keypoints_2d, hand_right_keypoints_2d}], + "canvas_width": W, "canvas_height": H} + meta_ref: ссылки на pose_metas + тип/доступ, чтобы правильно записать обратно. + """ + pose_metas = _as_attr(pose_data, "pose_metas", None) + if pose_metas is None: + # иногда называют иначе + pose_metas = _as_attr(pose_data, "frames", None) + + if pose_metas is None or not isinstance(pose_metas, list): + raise ValueError("pose_data does not contain 'pose_metas' list.") + + frames: List[Dict[str, Any]] = [] + for meta in pose_metas: + h = _as_attr(meta, "height", 1280) + w = _as_attr(meta, "width", 720) + + kps_body = _as_attr(meta, "kps_body", None) + kps_body_p = _as_attr(meta, "kps_body_p", None) + + kps_face = _as_attr(meta, "kps_face", None) + kps_face_p = _as_attr(meta, "kps_face_p", None) + + kps_lhand = _as_attr(meta, "kps_lhand", None) + kps_lhand_p = _as_attr(meta, "kps_lhand_p", None) + + kps_rhand = _as_attr(meta, "kps_rhand", None) + kps_rhand_p = _as_attr(meta, "kps_rhand_p", None) + + # to flat + pose_flat = _xy_p_to_flat(kps_body, kps_body_p) + face_flat = _xy_p_to_flat(kps_face, kps_face_p) + lh_flat = _xy_p_to_flat(kps_lhand, kps_lhand_p) + rh_flat = _xy_p_to_flat(kps_rhand, kps_rhand_p) + + if force_body_18 and isinstance(pose_flat, list) and len(pose_flat) >= 18 * 3: + pose_flat = pose_flat[: 18 * 3] + + person = { + "pose_keypoints_2d": pose_flat if pose_flat is not None else [], + "face_keypoints_2d": face_flat if face_flat is not None else [], + "hand_left_keypoints_2d": lh_flat, + "hand_right_keypoints_2d": rh_flat, + } + + frame = {"people": [person], "canvas_height": int(h), "canvas_width": int(w)} + frames.append(frame) + + meta_ref = { + "pose_metas": pose_metas, + "len": len(pose_metas), + } + return frames, meta_ref + + +def _kps_frames_to_pose_data( + pose_data_in: Any, + frames_kps: List[Dict[str, Any]], + meta_ref: Dict[str, Any], + *, + force_body_18: bool, +) -> Any: + """ + Записывает обратно сглаженные keypoints в pose_metas[*].kps_* / kps_*_p. + Остальные поля pose_data сохраняем. + """ + out_pd = copy.deepcopy(pose_data_in) + pose_metas_out = _as_attr(out_pd, "pose_metas", None) + if pose_metas_out is None: + # fallback: вдруг другой ключ + pose_metas_out = meta_ref.get("pose_metas") + + if pose_metas_out is None or not isinstance(pose_metas_out, list): + raise ValueError("Failed to locate pose_metas in output pose_data.") + + T = min(len(pose_metas_out), len(frames_kps)) + for t in range(T): + meta = pose_metas_out[t] + fr = frames_kps[t] + people = fr.get("people", []) if isinstance(fr, dict) else [] + p0 = people[0] if people else None + if not isinstance(p0, dict): + continue + + pose_flat = p0.get("pose_keypoints_2d") + face_flat = p0.get("face_keypoints_2d") + lh_flat = p0.get("hand_left_keypoints_2d") + rh_flat = p0.get("hand_right_keypoints_2d") + + if force_body_18 and isinstance(pose_flat, list) and len(pose_flat) >= 18 * 3: + pose_flat = pose_flat[: 18 * 3] + + body_xy, body_p = _flat_to_xy_p(pose_flat if isinstance(pose_flat, list) else None) + face_xy, face_p = _flat_to_xy_p(face_flat if isinstance(face_flat, list) else None) + lh_xy, lh_p = _flat_to_xy_p(lh_flat if isinstance(lh_flat, list) else None) + rh_xy, rh_p = _flat_to_xy_p(rh_flat if isinstance(rh_flat, list) else None) + + if body_xy is not None and body_p is not None: + _set_attr(meta, "kps_body", body_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_body_p", body_p.astype(np.float32, copy=False)) + + if face_xy is not None and face_p is not None: + _set_attr(meta, "kps_face", face_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_face_p", face_p.astype(np.float32, copy=False)) + + if lh_xy is not None and lh_p is not None: + _set_attr(meta, "kps_lhand", lh_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_lhand_p", lh_p.astype(np.float32, copy=False)) + + if rh_xy is not None and rh_p is not None: + _set_attr(meta, "kps_rhand", rh_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_rhand_p", rh_p.astype(np.float32, copy=False)) + + # обновим width/height если нужно + if isinstance(fr, dict): + if "canvas_width" in fr: + _set_attr(meta, "width", int(fr["canvas_width"])) + if "canvas_height" in fr: + _set_attr(meta, "height", int(fr["canvas_height"])) + + # обязательно положим pose_metas обратно + _set_attr(out_pd, "pose_metas", pose_metas_out) + return out_pd + + +def _extract_canvas_wh(data: Any, default_w: int, default_h: int) -> Tuple[int, int]: + w, h = int(default_w), int(default_h) + if isinstance(data, list): + for fr in data: + if isinstance(fr, dict) and "canvas_width" in fr and "canvas_height" in fr: + try: + w = int(fr["canvas_width"]) + h = int(fr["canvas_height"]) + break + except Exception: + pass + return w, h + + +# ============================================================ +# === START: smooth_KPS_json.py logic (ported as-is) +# ============================================================ + +# --- Root+Scale carry (when torso disappears on close-up) --- +ROOTSCALE_CARRY_ENABLED = True +CARRY_MAX_FRAMES = 48 +CARRY_MIN_ANCHORS = 2 +CARRY_ANCHOR_JOINTS = [0, 1, 2, 5, 3, 6, 4, 7] +CARRY_CONF_GATE = 0.20 + +# --- Main person selection / multi-person filtering --- +FILTER_EXTRA_PEOPLE = True +MAIN_PERSON_MODE = "longest_track" +TRACK_MATCH_MIN_PX = 80.0 +TRACK_MATCH_FACTOR = 3.0 +TRACK_MAX_FRAME_GAP = 32 + +# --- Spatial outlier suppression --- +SPATIAL_OUTLIER_FIX = True +BONE_MAX_FACTOR = 2.3 +TORSO_RADIUS_FACTOR = 4.0 + +# EMA smoothing for BODY only (online) +ALPHA_BODY = 0.70 +MAX_STEP_BODY = 60.0 +VEL_ALPHA = 0.45 +EPS = 0.3 +CONF_GATE_BODY = 0.20 +CONF_FLOOR_BODY = 0.00 + +TRACK_DIST_PENALTY = 1.5 +FACE_WEIGHT_IN_SCORE = 0.15 +HAND_WEIGHT_IN_SCORE = 0.35 + +ALLOW_DISAPPEAR_JOINTS = {3, 4, 6, 7} + +GAP_FILL_ENABLED = True +MAX_GAP_FRAMES = 12 +MIN_RUN_FRAMES = 2 + +TORSO_SYNC_ENABLED = True +TORSO_JOINTS = {1, 2, 5, 8, 11} +TORSO_LOOKAHEAD_FRAMES = 32 + +SUPER_SMOOTH_ENABLED = True +SUPER_SMOOTH_ALPHA = 0.7 +SUPER_SMOOTH_MIN_CONF = 0.20 + +MEDIAN3_ENABLED = True + +FACE_SMOOTH_ENABLED = True +HANDS_SMOOTH_ENABLED = False + +CONF_GATE_FACE = 0.20 +CONF_GATE_HAND = 0.50 + +HAND_MIN_POINTS_PRESENT = 7 +MIN_HAND_RUN_FRAMES = 6 + +DENSE_GAP_FILL_ENABLED = False +DENSE_MAX_GAP_FRAMES = 8 +DENSE_MIN_RUN_FRAMES = 2 + +DENSE_MEDIAN3_ENABLED = False +DENSE_SUPER_SMOOTH_ENABLED = False +DENSE_SUPER_SMOOTH_ALPHA = 0.7 + + +def _snapshot_tunable_globals() -> Dict[str, Any]: + keys = [ + "FILTER_EXTRA_PEOPLE", + "SUPER_SMOOTH_ALPHA", + "MAX_GAP_FRAMES", + "MIN_RUN_FRAMES", + "DENSE_SUPER_SMOOTH_ALPHA", + "DENSE_MAX_GAP_FRAMES", + "DENSE_MIN_RUN_FRAMES", + ] + return {k: globals().get(k) for k in keys} + + +def _restore_tunable_globals(old: Dict[str, Any]) -> None: + for k, v in old.items(): + globals()[k] = v + + +def _is_valid_xyc(x: float, y: float, c: float) -> bool: + if c is None: + return False + if c <= 0: + return False + if x == 0 and y == 0: + return False + if math.isnan(x) or math.isnan(y) or math.isnan(c): + return False + return True + + +def _reshape_keypoints_2d(arr: List[float]) -> List[Tuple[float, float, float]]: + if arr is None: + return [] + if len(arr) % 3 != 0: + raise ValueError(f"keypoints length not multiple of 3: {len(arr)}") + out = [] + for i in range(0, len(arr), 3): + out.append((float(arr[i]), float(arr[i + 1]), float(arr[i + 2]))) + return out + + +def _flatten_keypoints_2d(kps: List[Tuple[float, float, float]]) -> List[float]: + out: List[float] = [] + for x, y, c in kps: + out.extend([float(x), float(y), float(c)]) + return out + + +def _sum_conf(arr: Optional[List[float]], sample_step: int = 1) -> float: + if not arr: + return 0.0 + s = 0.0 + for i in range(2, len(arr), 3 * sample_step): + try: + c = float(arr[i]) + except Exception: + c = 0.0 + if c > 0: + s += c + return s + + +def _body_center_from_pose(pose_arr: Optional[List[float]]) -> Optional[Tuple[float, float]]: + if not pose_arr: + return None + kps = _reshape_keypoints_2d(pose_arr) + idxs = [2, 5, 8, 11, 1] + pts = [] + for idx in idxs: + if idx < len(kps): + x, y, c = kps[idx] + if _is_valid_xyc(x, y, c): + pts.append((x, y)) + if not pts: + for x, y, c in kps: + if _is_valid_xyc(x, y, c): + pts.append((x, y)) + if not pts: + return None + cx = sum(p[0] for p in pts) / len(pts) + cy = sum(p[1] for p in pts) / len(pts) + return (cx, cy) + + +def _dist(a: Tuple[float, float], b: Tuple[float, float]) -> float: + return math.hypot(a[0] - b[0], a[1] - b[1]) + + +def _choose_single_person( + people: List[Dict[str, Any]], prev_center: Optional[Tuple[float, float]] +) -> Optional[Dict[str, Any]]: + if not people: + return None + best = None + best_score = -1e18 + + for p in people: + pose = p.get("pose_keypoints_2d") + face = p.get("face_keypoints_2d") + lh = p.get("hand_left_keypoints_2d") + rh = p.get("hand_right_keypoints_2d") + + score = _sum_conf(pose) + score += FACE_WEIGHT_IN_SCORE * _sum_conf(face, sample_step=4) + score += HAND_WEIGHT_IN_SCORE * (_sum_conf(lh, sample_step=2) + _sum_conf(rh, sample_step=2)) + + center = _body_center_from_pose(pose) + if prev_center is not None and center is not None: + score -= TRACK_DIST_PENALTY * _dist(prev_center, center) + + if score > best_score: + best_score = score + best = p + + return best + + +@dataclass +class _Track: + frames: Dict[int, Dict[str, Any]] + centers: Dict[int, Tuple[float, float]] + last_t: int + last_center: Tuple[float, float] + + +def _estimate_torso_scale(pose: List[Tuple[float, float, float]]) -> Optional[float]: + def dist(i, k) -> Optional[float]: + if i >= len(pose) or k >= len(pose): + return None + xi, yi, ci = pose[i] + xk, yk, ck = pose[k] + if not _is_valid_xyc(xi, yi, ci) or not _is_valid_xyc(xk, yk, ck): + return None + return math.hypot(xi - xk, yi - yk) + + cand = [dist(2, 5), dist(8, 11), dist(1, 8), dist(1, 11)] + cand = [c for c in cand if c is not None and c > 1e-3] + if not cand: + return None + return float(sum(cand) / len(cand)) + + +def _track_match_threshold_from_pose(pose_arr: Optional[List[float]]) -> float: + if isinstance(pose_arr, list): + pose = _reshape_keypoints_2d(pose_arr) + s = _estimate_torso_scale(pose) + if s is not None: + return max(float(TRACK_MATCH_MIN_PX), float(TRACK_MATCH_FACTOR) * float(s)) + return float(max(TRACK_MATCH_MIN_PX, 120.0)) + + +def _build_tracks_over_video(frames_data: List[Any]) -> List[_Track]: + tracks: List[_Track] = [] + + for t, frame in enumerate(frames_data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or not people: + continue + + cand: List[Tuple[int, Dict[str, Any], Tuple[float, float]]] = [] + for i, p in enumerate(people): + if not isinstance(p, dict): + continue + pose = p.get("pose_keypoints_2d") + c = _body_center_from_pose(pose) + if c is None: + continue + cand.append((i, p, c)) + + if not cand: + continue + + used = set() + track_order = sorted(range(len(tracks)), key=lambda k: tracks[k].last_t, reverse=True) + + for k in track_order: + tr = tracks[k] + age = t - tr.last_t + if age > int(TRACK_MAX_FRAME_GAP): + continue + + best_idx = None + best_d = 1e18 + + for i, p, cc in cand: + if i in used: + continue + + thr = _track_match_threshold_from_pose(p.get("pose_keypoints_2d")) + d = _dist(tr.last_center, cc) + if d <= thr and d < best_d: + best_d = d + best_idx = i + + if best_idx is not None: + i, p, cc = next(x for x in cand if x[0] == best_idx) + used.add(i) + tr.frames[t] = p + tr.centers[t] = cc + tr.last_t = t + tr.last_center = cc + + for i, p, cc in cand: + if i in used: + continue + tracks.append(_Track(frames={t: p}, centers={t: cc}, last_t=t, last_center=cc)) + + return tracks + + +def _track_presence_score(tr: _Track) -> Tuple[int, float, float]: + frames_count = len(tr.frames) + face_sum = 0.0 + body_sum = 0.0 + for p in tr.frames.values(): + face_sum += _sum_conf(p.get("face_keypoints_2d"), sample_step=4) + body_sum += _sum_conf(p.get("pose_keypoints_2d"), sample_step=1) + return (frames_count, face_sum, body_sum) + + +def _pick_main_track(tracks: List[_Track]) -> Optional[_Track]: + if not tracks: + return None + best = None + best_key = (-1, -1e18, -1e18) + for tr in tracks: + key = _track_presence_score(tr) + if key > best_key: + best_key = key + best = tr + return best + + +@dataclass +class BodyState: + last_xy: List[Optional[Tuple[float, float]]] + last_v: List[Tuple[float, float]] + + def __init__(self, joints: int): + self.last_xy = [None] * joints + self.last_v = [(0.0, 0.0)] * joints + + +def _smooth_body_pose(pose_arr: Optional[List[float]], state: BodyState) -> Optional[List[float]]: + if pose_arr is None: + return None + + kps = _reshape_keypoints_2d(pose_arr) + J = len(kps) + if len(state.last_xy) != J: + state.last_xy = [None] * J + state.last_v = [(0.0, 0.0)] * J + + out: List[Tuple[float, float, float]] = [] + + for j in range(J): + x, y, c = kps[j] + last = state.last_xy[j] + vx_last, vy_last = state.last_v[j] + + valid_in = _is_valid_xyc(x, y, c) and (c >= CONF_GATE_BODY) + + if valid_in: + if last is None: + nx, ny = x, y + state.last_xy[j] = (nx, ny) + state.last_v[j] = (0.0, 0.0) + out.append((nx, ny, float(c))) + continue + + dx_raw = x - last[0] + dy_raw = y - last[1] + if abs(dx_raw) < EPS: + dx_raw = 0.0 + if abs(dy_raw) < EPS: + dy_raw = 0.0 + + vx = VEL_ALPHA * dx_raw + (1.0 - VEL_ALPHA) * vx_last + vy = VEL_ALPHA * dy_raw + (1.0 - VEL_ALPHA) * vy_last + + px = last[0] + vx + py = last[1] + vy + + nx = ALPHA_BODY * x + (1.0 - ALPHA_BODY) * px + ny = ALPHA_BODY * y + (1.0 - ALPHA_BODY) * py + + ddx = nx - last[0] + ddy = ny - last[1] + d = math.hypot(ddx, ddy) + if d > MAX_STEP_BODY and d > 1e-6: + scale = MAX_STEP_BODY / d + nx = last[0] + ddx * scale + ny = last[1] + ddy * scale + vx = nx - last[0] + vy = ny - last[1] + + state.last_xy[j] = (nx, ny) + state.last_v[j] = (vx, vy) + + out.append((nx, ny, float(c))) + else: + out.append((float(x), float(y), float(c))) + + return _flatten_keypoints_2d(out) + + +COCO18_EDGES = [ + (1, 2), + (2, 3), + (3, 4), + (1, 5), + (5, 6), + (6, 7), + (1, 8), + (8, 9), + (9, 10), + (1, 11), + (11, 12), + (12, 13), + (8, 11), + (1, 0), + (0, 14), + (14, 16), + (0, 15), + (15, 17), +] + +HAND21_EDGES = [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (0, 5), + (5, 6), + (6, 7), + (7, 8), + (0, 9), + (9, 10), + (10, 11), + (11, 12), + (0, 13), + (13, 14), + (14, 15), + (15, 16), + (0, 17), + (17, 18), + (18, 19), + (19, 20), +] + +_NEIGHBORS = None + + +def _build_neighbors(): + global _NEIGHBORS + if _NEIGHBORS is not None: + return + neigh = {} + for a, b in COCO18_EDGES: + neigh.setdefault(a, set()).add(b) + neigh.setdefault(b, set()).add(a) + _NEIGHBORS = neigh + + +def _suppress_spatial_outliers_in_pose_arr( + pose_arr: Optional[List[float]], *, conf_gate: float +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + + pose = _reshape_keypoints_2d(pose_arr) + J = len(pose) + + center = _body_center_from_pose(pose_arr) + scale = _estimate_torso_scale(pose) + if center is None or scale is None: + return pose_arr + + cx, cy = center + max_r = TORSO_RADIUS_FACTOR * scale + max_bone = BONE_MAX_FACTOR * scale + + out = [list(p) for p in pose] + + def visible(j: int) -> bool: + if j >= J: + return False + x, y, c = out[j] + return (c >= conf_gate) and not (x == 0 and y == 0) + + for j in range(J): + x, y, c = out[j] + if c >= conf_gate and not (x == 0 and y == 0): + if math.hypot(x - cx, y - cy) > max_r: + out[j] = [0.0, 0.0, 0.0] + + for a, b in COCO18_EDGES: + if a >= J or b >= J: + continue + if not visible(a) or not visible(b): + continue + ax, ay, ac = out[a] + bx, by, bc = out[b] + d = math.hypot(ax - bx, ay - by) + if d > max_bone: + if ac <= bc: + out[a] = [0.0, 0.0, 0.0] + else: + out[b] = [0.0, 0.0, 0.0] + + flat: List[float] = [] + for x, y, c in out: + flat.extend([float(x), float(y), float(c)]) + return flat + + +def _suppress_isolated_joints_in_pose_arr( + pose_arr: Optional[List[float]], *, conf_gate: float, keep: set[int] = None +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + + _build_neighbors() + pose = _reshape_keypoints_2d(pose_arr) + J = len(pose) + out = [list(p) for p in pose] + + if keep is None: + keep = set() + + def vis(j: int) -> bool: + if j >= J: + return False + x, y, c = out[j] + return (c >= conf_gate) and not (x == 0 and y == 0) + + for j in range(J): + if j in keep: + continue + if not vis(j): + continue + neighs = _NEIGHBORS.get(j, set()) + if not any((n < J and vis(n)) for n in neighs): + out[j] = [0.0, 0.0, 0.0] + + flat = [] + for x, y, c in out: + flat.extend([float(x), float(y), float(c)]) + return flat + + +def _denoise_and_fill_gaps_pose_seq( + pose_arr_seq: List[Optional[List[float]]], + *, + conf_gate: float, + min_run: int, + max_gap: int, +) -> List[Optional[List[float]]]: + if not pose_arr_seq: + return pose_arr_seq + + J = None + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_arr_seq + + T = len(pose_arr_seq) + out_seq: List[Optional[List[float]]] = [] + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) == J * 3: + out_seq.append(list(arr)) + else: + out_seq.append(arr) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + # 1) remove short flashes + for j in range(J): + start = None + for t in range(T + 1): + cur = False + if t < T and isinstance(out_seq[t], list): + cur = is_vis(out_seq[t], j) + if cur and start is None: + start = t + if (not cur) and start is not None: + run_len = t - start + if run_len < min_run: + for k in range(start, t): + if not isinstance(out_seq[k], list): + continue + out_seq[k][3 * j + 0] = 0.0 + out_seq[k][3 * j + 1] = 0.0 + out_seq[k][3 * j + 2] = 0.0 + start = None + + # 2) gap fill only if returns + for j in range(J): + last_vis_t = None + t = 0 + while t < T: + arr = out_seq[t] + if not isinstance(arr, list): + t += 1 + continue + + cur_vis = is_vis(arr, j) + if cur_vis: + last_vis_t = t + t += 1 + continue + + if last_vis_t is None: + t += 1 + continue + + gap_start = t + t2 = t + while t2 < T: + arr2 = out_seq[t2] + if isinstance(arr2, list) and is_vis(arr2, j): + break + t2 += 1 + + if t2 >= T: + break + + gap_len = t2 - gap_start + if gap_len <= 0: + t = t2 + continue + + if gap_len <= max_gap: + a = out_seq[last_vis_t] + b = out_seq[t2] + if isinstance(a, list) and isinstance(b, list): + ax, ay, ac = float(a[3 * j + 0]), float(a[3 * j + 1]), float(a[3 * j + 2]) + bx, by, bc = float(b[3 * j + 0]), float(b[3 * j + 1]), float(b[3 * j + 2]) + if not (ax == 0 and ay == 0) and not (bx == 0 and by == 0): + conf_fill = min(ac, bc) + for k in range(gap_len): + tt = gap_start + k + if not isinstance(out_seq[tt], list): + continue + r = (k + 1) / (gap_len + 1) + x = ax + (bx - ax) * r + y = ay + (by - ay) * r + out_seq[tt][3 * j + 0] = float(x) + out_seq[tt][3 * j + 1] = float(y) + out_seq[tt][3 * j + 2] = float(conf_fill) + + t = t2 + + return out_seq + + +def _zero_lag_ema_pose_seq( + pose_seq: List[Optional[List[float]]], *, alpha: float, conf_gate: float +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + T = len(pose_seq) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + fwd = [None] * T + last = [None] * J + for t in range(T): + arr = pose_seq[t] + if not isinstance(arr, list) or len(arr) != J * 3: + fwd[t] = arr + continue + out = list(arr) + for j in range(J): + if is_vis(arr, j): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + if last[j] is None: + sx, sy = x, y + else: + sx = alpha * x + (1 - alpha) * last[j][0] + sy = alpha * y + (1 - alpha) * last[j][1] + last[j] = (sx, sy) + out[3 * j + 0] = float(sx) + out[3 * j + 1] = float(sy) + fwd[t] = out + + bwd = [None] * T + last = [None] * J + for t in range(T - 1, -1, -1): + arr = fwd[t] + if not isinstance(arr, list) or len(arr) != J * 3: + bwd[t] = arr + continue + out = list(arr) + for j in range(J): + if is_vis(arr, j): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + if last[j] is None: + sx, sy = x, y + else: + sx = alpha * x + (1 - alpha) * last[j][0] + sy = alpha * y + (1 - alpha) * last[j][1] + last[j] = (sx, sy) + out[3 * j + 0] = float(sx) + out[3 * j + 1] = float(sy) + bwd[t] = out + + return bwd + + +def _apply_root_scale( + pose_arr: Optional[List[float]], + *, + src_root: Tuple[float, float], + src_scale: float, + dst_root: Tuple[float, float], + dst_scale: float, +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + if src_scale <= 1e-6 or dst_scale <= 1e-6: + return pose_arr + + kps = _reshape_keypoints_2d(pose_arr) + out = [] + s = dst_scale / src_scale + + for x, y, c in kps: + if c <= 0 or (x == 0 and y == 0): + out.append((x, y, c)) + continue + nx = dst_root[0] + (x - src_root[0]) * s + ny = dst_root[1] + (y - src_root[1]) * s + out.append((nx, ny, c)) + + return _flatten_keypoints_2d(out) + + +def _carry_pose_when_torso_missing( + pose_seq: List[Optional[List[float]]], + *, + conf_gate: float, + max_carry: int, + anchor_joints: List[int], + min_anchors: int, +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + out = [a if a is None else list(a) for a in pose_seq] + + FILL_JOINTS = {1, 8, 9, 10, 11, 12, 13} + FILL_JOINTS -= set(ALLOW_DISAPPEAR_JOINTS) + + def is_vis_flat(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + def count_visible(arr: List[float], joints: List[int]) -> int: + c = 0 + for j in joints: + if j < J and is_vis_flat(arr, j): + c += 1 + return c + + def root_scale_from_anchors(arr: List[float]) -> Optional[Tuple[Tuple[float, float], float]]: + pts = [] + for j in anchor_joints: + if j >= J: + continue + if is_vis_flat(arr, j): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + pts.append((x, y)) + if len(pts) < min_anchors: + return None + + rx = sum(p[0] for p in pts) / len(pts) + ry = sum(p[1] for p in pts) / len(pts) + + xs = [p[0] for p in pts] + ys = [p[1] for p in pts] + scale = max(max(xs) - min(xs), max(ys) - min(ys)) + if scale <= 1e-3: + return None + + return (rx, ry), float(scale) + + last_good: Optional[List[float]] = None + last_good_rs: Optional[Tuple[Tuple[float, float], float]] = None + carry_left = 0 + + for t in range(len(out)): + arr = out[t] + if not isinstance(arr, list) or len(arr) != J * 3: + continue + + anchors_ok = count_visible(arr, anchor_joints) >= min_anchors + fill_vis = sum(1 for j in FILL_JOINTS if j < J and is_vis_flat(arr, j)) + rs = root_scale_from_anchors(arr) + + if anchors_ok and rs is not None and fill_vis >= 2: + last_good = list(arr) + last_good_rs = rs + carry_left = max_carry + continue + + if anchors_ok and rs is not None and last_good is not None and last_good_rs is not None and carry_left > 0: + dst_root, dst_scale = rs + src_root, src_scale = last_good_rs + + carried_full = _apply_root_scale( + last_good, + src_root=src_root, + src_scale=src_scale, + dst_root=dst_root, + dst_scale=dst_scale, + ) + if isinstance(carried_full, list) and len(carried_full) == J * 3: + for j in FILL_JOINTS: + if j >= J: + continue + if is_vis_flat(arr, j): + continue + + cx = float(carried_full[3 * j + 0]) + cy = float(carried_full[3 * j + 1]) + cc = float(carried_full[3 * j + 2]) + + if (cx == 0 and cy == 0) or cc <= 0: + continue + + arr[3 * j + 0] = cx + arr[3 * j + 1] = cy + arr[3 * j + 2] = max(min(cc, 0.60), conf_gate) + + out[t] = arr + carry_left -= 1 + continue + + carry_left = max(carry_left - 1, 0) + + return out + + +def _force_full_torso_pair( + pose_seq: List[Optional[List[float]]], + *, + conf_gate: float, + anchor_joints: List[int], + min_anchors: int, + max_lookback: int = 240, + fill_legs_with_hip: bool = True, + always_fill_if_one_hip: bool = True, +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + out = [a if a is None else list(a) for a in pose_seq] + + R_HIP, R_KNEE, R_ANK = 8, 9, 10 + L_HIP, L_KNEE, L_ANK = 11, 12, 13 + + def is_vis(arr: List[float], j: int) -> bool: + if j >= J: + return False + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + def count_visible(arr: List[float], joints: List[int]) -> int: + c = 0 + for j in joints: + if is_vis(arr, j): + c += 1 + return c + + def root_scale_from_anchors(arr: List[float]) -> Optional[Tuple[Tuple[float, float], float]]: + pts = [] + for j in anchor_joints: + if j >= J: + continue + if is_vis(arr, j): + pts.append((float(arr[3 * j + 0]), float(arr[3 * j + 1]))) + if len(pts) < min_anchors: + return None + + rx = sum(p[0] for p in pts) / len(pts) + ry = sum(p[1] for p in pts) / len(pts) + + xs = [p[0] for p in pts] + ys = [p[1] for p in pts] + scale = max(max(xs) - min(xs), max(ys) - min(ys)) + if scale <= 1e-3: + return None + return (rx, ry), float(scale) + + last_full_idx = None + last_full = None + last_full_rs = None + + for t in range(len(out)): + arr = out[t] + if not isinstance(arr, list) or len(arr) != J * 3: + continue + + rs = root_scale_from_anchors(arr) + + r_ok = is_vis(arr, R_HIP) + l_ok = is_vis(arr, L_HIP) + + anchors_ok = count_visible(arr, anchor_joints) >= min_anchors + + if anchors_ok and rs is not None and r_ok and l_ok: + last_full_idx = t + last_full = list(arr) + last_full_rs = rs + continue + + if last_full is None or last_full_rs is None or last_full_idx is None: + continue + if (t - last_full_idx) > max_lookback: + continue + if not (r_ok or l_ok): + continue + if r_ok and l_ok: + continue + if not always_fill_if_one_hip: + continue + if rs is None: + continue + + dst_root, dst_scale = rs + src_root, src_scale = last_full_rs + + carried = _apply_root_scale( + last_full, + src_root=src_root, + src_scale=src_scale, + dst_root=dst_root, + dst_scale=dst_scale, + ) + if not (isinstance(carried, list) and len(carried) == J * 3): + continue + + def copy_joint(j: int): + if j >= J: + return + if is_vis(arr, j): + return + cx = float(carried[3 * j + 0]) + cy = float(carried[3 * j + 1]) + cc = float(carried[3 * j + 2]) + if (cx == 0 and cy == 0) or cc <= 0: + return + arr[3 * j + 0] = cx + arr[3 * j + 1] = cy + arr[3 * j + 2] = max(min(cc, 0.60), conf_gate) + + if not r_ok: + copy_joint(R_HIP) + if fill_legs_with_hip: + copy_joint(R_KNEE) + copy_joint(R_ANK) + + if not l_ok: + copy_joint(L_HIP) + if fill_legs_with_hip: + copy_joint(L_KNEE) + copy_joint(L_ANK) + + out[t] = arr + + return out + + +def _median3_pose_seq(pose_seq: List[Optional[List[float]]], *, conf_gate: float) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + T = len(pose_seq) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + out_seq: List[Optional[List[float]]] = [] + for t in range(T): + arr = pose_seq[t] + if not isinstance(arr, list) or len(arr) != J * 3: + out_seq.append(arr) + continue + + out = list(arr) + t0 = max(0, t - 1) + t1 = t + t2 = min(T - 1, t + 1) + + a0 = pose_seq[t0] + a1 = pose_seq[t1] + a2 = pose_seq[t2] + + for j in range(J): + if not is_vis(arr, j): + continue + + xs, ys = [], [] + for aa in (a0, a1, a2): + if isinstance(aa, list) and len(aa) == J * 3 and is_vis(aa, j): + xs.append(float(aa[3 * j + 0])) + ys.append(float(aa[3 * j + 1])) + + if len(xs) >= 2: + xs.sort() + ys.sort() + out[3 * j + 0] = float(xs[len(xs) // 2]) + out[3 * j + 1] = float(ys[len(ys) // 2]) + + out_seq.append(out) + + return out_seq + + +def _sync_group_appearances( + pose_arr_seq: List[Optional[List[float]]], + *, + group: set[int], + conf_gate: float, + lookahead: int, +) -> List[Optional[List[float]]]: + if not pose_arr_seq: + return pose_arr_seq + + J = None + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_arr_seq + + T = len(pose_arr_seq) + out_seq: List[Optional[List[float]]] = [] + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) == J * 3: + out_seq.append(list(arr)) + else: + out_seq.append(arr) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + for t in range(T): + arr = out_seq[t] + if not isinstance(arr, list): + continue + + vis = {j for j in group if j < J and is_vis(arr, j)} + if not vis: + continue + + missing = {j for j in group if j < J and j not in vis} + if not missing: + continue + + appear_t: dict[int, int] = {} + for j in list(missing): + t2 = t + 1 + while t2 < T and t2 <= t + lookahead: + arr2 = out_seq[t2] + if isinstance(arr2, list) and is_vis(arr2, j): + appear_t[j] = t2 + break + t2 += 1 + + if not appear_t: + continue + + for j, t2 in appear_t.items(): + last_t = None + for tb in range(t - 1, -1, -1): + arrb = out_seq[tb] + if isinstance(arrb, list) and is_vis(arrb, j): + last_t = tb + break + + if last_t is None: + b = out_seq[t2] + if not isinstance(b, list): + continue + bx, by, bc = float(b[3 * j + 0]), float(b[3 * j + 1]), float(b[3 * j + 2]) + for k in range(t, t2): + a = out_seq[k] + if not isinstance(a, list): + continue + a[3 * j + 0] = bx + a[3 * j + 1] = by + a[3 * j + 2] = bc + continue + + a0 = out_seq[last_t] + b0 = out_seq[t2] + if not (isinstance(a0, list) and isinstance(b0, list)): + continue + + ax, ay, ac = float(a0[3 * j + 0]), float(a0[3 * j + 1]), float(a0[3 * j + 2]) + bx, by, bc = float(b0[3 * j + 0]), float(b0[3 * j + 1]), float(b0[3 * j + 2]) + + if (ax == 0 and ay == 0) or (bx == 0 and by == 0): + continue + + conf_fill = min(ac, bc) + total = t2 - last_t + if total <= 0: + continue + + for tt in range(t, t2): + a = out_seq[tt] + if not isinstance(a, list): + continue + r = (tt - last_t) / total + x = ax + (bx - ax) * r + y = ay + (by - ay) * r + a[3 * j + 0] = float(x) + a[3 * j + 1] = float(y) + a[3 * j + 2] = float(conf_fill) + + return out_seq + + +def _count_valid_points(arr: Optional[List[float]], *, conf_gate: float) -> int: + if not isinstance(arr, list) or len(arr) % 3 != 0: + return 0 + cnt = 0 + for i in range(0, len(arr), 3): + x, y, c = float(arr[i]), float(arr[i + 1]), float(arr[i + 2]) + if c >= conf_gate and not (x == 0 and y == 0): + cnt += 1 + return cnt + + +def _zero_out_kps(arr: Optional[List[float]]) -> Optional[List[float]]: + if not isinstance(arr, list) or len(arr) % 3 != 0: + return arr + out = list(arr) + for i in range(0, len(out), 3): + out[i + 0] = 0.0 + out[i + 1] = 0.0 + out[i + 2] = 0.0 + return out + + +def _pin_body_wrist_to_hand( + p_out: Dict[str, Any], + *, + side: str, + conf_gate_body: float = 0.2, + conf_gate_hand: float = 0.2, + blend: float = 1.0, +) -> None: + if side == "right": + bw = 4 + hk = "hand_right_keypoints_2d" + else: + bw = 7 + hk = "hand_left_keypoints_2d" + + pose = p_out.get("pose_keypoints_2d") + hand = p_out.get(hk) + + if not (isinstance(pose, list) and isinstance(hand, list)): + return + if len(pose) < (bw * 3 + 3): + return + if len(hand) < 3: + return + + hx, hy, hc = float(hand[0]), float(hand[1]), float(hand[2]) + if hc < conf_gate_hand or (hx == 0.0 and hy == 0.0): + return + + bx, by, bc = float(pose[bw * 3 + 0]), float(pose[bw * 3 + 1]), float(pose[bw * 3 + 2]) + + if bc < conf_gate_body or (bx == 0.0 and by == 0.0): + pose[bw * 3 + 0] = hx + pose[bw * 3 + 1] = hy + pose[bw * 3 + 2] = float(max(bc, min(hc, 0.9))) + else: + nx = bx * (1.0 - blend) + hx * blend + ny = by * (1.0 - blend) + hy * blend + pose[bw * 3 + 0] = nx + pose[bw * 3 + 1] = ny + pose[bw * 3 + 2] = float(min(bc, hc)) + + p_out["pose_keypoints_2d"] = pose + + +def _fix_elbow_using_wrist(p_out: Dict[str, Any], *, side: str, conf_gate: float = 0.2) -> None: + pose = p_out.get("pose_keypoints_2d") + if not isinstance(pose, list) or len(pose) % 3 != 0: + return + + if side == "right": + sh, el, wr = 2, 3, 4 + else: + sh, el, wr = 5, 6, 7 + + def get(j): + return float(pose[3 * j + 0]), float(pose[3 * j + 1]), float(pose[3 * j + 2]) + + def vis(x, y, c): + return c >= conf_gate and not (x == 0.0 and y == 0.0) + + sx, sy, sc = get(sh) + ex, ey, ec = get(el) + wx, wy, wc = get(wr) + + if not (vis(sx, sy, sc) and vis(wx, wy, wc)): + return + + if vis(ex, ey, ec): + Lse = math.hypot(ex - sx, ey - sy) + Lew = math.hypot(wx - ex, wy - ey) + else: + dsw = math.hypot(wx - sx, wy - sy) + if dsw < 1e-3: + return + Lse = 0.55 * dsw + Lew = 0.45 * dsw + + dx = wx - sx + dy = wy - sy + d = math.hypot(dx, dy) + if d < 1e-6: + return + + d2 = max(min(d, (Lse + Lew) - 1e-3), abs(Lse - Lew) + 1e-3) + + a = (Lse * Lse - Lew * Lew + d2 * d2) / (2.0 * d2) + h2 = max(Lse * Lse - a * a, 0.0) + h = math.sqrt(h2) + + ux = dx / d + uy = dy / d + px = sx + a * ux + py = sy + a * uy + + rx = -uy + ry = ux + + e1x, e1y = px + h * rx, py + h * ry + e2x, e2y = px - h * rx, py - h * ry + + if vis(ex, ey, ec): + if math.hypot(e1x - ex, e1y - ey) <= math.hypot(e2x - ex, e2y - ey): + nx, ny = e1x, e1y + else: + nx, ny = e2x, e2y + else: + nx, ny = e1x, e1y + + pose[3 * el + 0] = float(nx) + pose[3 * el + 1] = float(ny) + pose[3 * el + 2] = float(max(min(ec, 0.8), conf_gate)) + + p_out["pose_keypoints_2d"] = pose + + +def _remove_short_presence_runs_kps_seq( + seq: List[Optional[List[float]]], + *, + conf_gate: float, + min_points_present: int, + min_run: int, +) -> List[Optional[List[float]]]: + if not seq: + return seq + + present = [(_count_valid_points(a, conf_gate=conf_gate) >= min_points_present) for a in seq] + out = [None if a is None else list(a) for a in seq] + + start = None + for t in range(len(seq) + 1): + cur = present[t] if t < len(seq) else False + if cur and start is None: + start = t + if (not cur) and start is not None: + run_len = t - start + if run_len < min_run: + for k in range(start, t): + out[k] = _zero_out_kps(out[k]) + start = None + + return out + + +def _zero_sparse_frames_kps_seq( + seq: List[Optional[List[float]]], *, conf_gate: float, min_points_present: int +) -> List[Optional[List[float]]]: + if not seq: + return seq + + out: List[Optional[List[float]]] = [] + for a in seq: + if not isinstance(a, list): + out.append(a) + continue + if _count_valid_points(a, conf_gate=conf_gate) < min_points_present: + out.append(_zero_out_kps(a)) + else: + out.append(a) + return out + + +def _suppress_spatial_outliers_in_hand_arr( + hand_arr: Optional[List[float]], *, conf_gate: float, max_bone_factor: float = 3.0 +) -> Optional[List[float]]: + if not isinstance(hand_arr, list) or len(hand_arr) % 3 != 0: + return hand_arr + pts = _reshape_keypoints_2d(hand_arr) + J = len(pts) + if J < 21: + return hand_arr + + out = [list(p) for p in pts] + + def vis(j: int) -> bool: + x, y, c = out[j] + return c >= conf_gate and not (x == 0 and y == 0) + + vv = [(x, y) for (x, y, c) in out if c >= conf_gate and not (x == 0 and y == 0)] + if len(vv) < 6: + return hand_arr + xs = [p[0] for p in vv] + ys = [p[1] for p in vv] + scale = max(max(xs) - min(xs), max(ys) - min(ys)) + if scale <= 1e-3: + return hand_arr + max_bone = max_bone_factor * scale + + for a, b in HAND21_EDGES: + if a >= J or b >= J: + continue + if not vis(a) or not vis(b): + continue + ax, ay, ac = out[a] + bx, by, bc = out[b] + d = math.hypot(ax - bx, ay - by) + if d > max_bone: + if ac <= bc: + out[a] = [0.0, 0.0, 0.0] + else: + out[b] = [0.0, 0.0, 0.0] + + return _flatten_keypoints_2d([(x, y, c) for x, y, c in out]) + + +def _body_head_root_scale_from_pose( + pose_arr: Optional[List[float]], *, conf_gate: float +) -> Optional[Tuple[Tuple[float, float], float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return None + kps = _reshape_keypoints_2d(pose_arr) + + def vis(j: int) -> Optional[Tuple[float, float]]: + if j >= len(kps): + return None + x, y, c = kps[j] + if c >= conf_gate and not (x == 0 and y == 0): + return (float(x), float(y)) + return None + + pts = [] + for j in [0, 1, 14, 15, 16, 17]: + p = vis(j) + if p is not None: + pts.append(p) + + if not pts: + return None + + rx = sum(p[0] for p in pts) / len(pts) + ry = sum(p[1] for p in pts) / len(pts) + root = (rx, ry) + + def dist(a: int, b: int) -> Optional[float]: + pa, pb = vis(a), vis(b) + if pa is None or pb is None: + return None + d = math.hypot(pa[0] - pb[0], pa[1] - pb[1]) + return d if d > 1e-3 else None + + cands = [dist(14, 15), dist(16, 17), dist(2, 5)] + cands = [c for c in cands if c is not None] + if not cands: + return None + + scale = float(sum(cands) / len(cands)) + return root, scale + + +def _body_wrist_root_scale_from_pose( + pose_arr: Optional[List[float]], *, side: str, conf_gate: float +) -> Optional[Tuple[Tuple[float, float], float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return None + kps = _reshape_keypoints_2d(pose_arr) + + if side == "right": + w, e = 4, 3 + else: + w, e = 7, 6 + + def vis(j: int) -> Optional[Tuple[float, float]]: + if j >= len(kps): + return None + x, y, c = kps[j] + if c >= conf_gate and not (x == 0 and y == 0): + return (float(x), float(y)) + return None + + pw = vis(w) + if pw is None: + return None + root = pw + + pe = vis(e) + scale = None + if pe is not None: + d = math.hypot(pw[0] - pe[0], pw[1] - pe[1]) + if d > 1e-3: + scale = d + + if scale is None: + p2 = vis(2) + p5 = vis(5) + if p2 is not None and p5 is not None: + d = math.hypot(p2[0] - p5[0], p2[1] - p5[1]) + if d > 1e-3: + scale = d + + if scale is None: + return None + + return root, float(scale) + + +def _smooth_dense_seq_anchored_to_body( + dense_seq: List[Optional[List[float]]], + body_pose_seq: List[Optional[List[float]]], + *, + kind: str, + conf_gate_dense: float, + conf_gate_body: float, + median3: bool, + zero_lag_alpha: float, +) -> List[Optional[List[float]]]: + if not dense_seq: + return dense_seq + + Jd = None + for a in dense_seq: + if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0: + Jd = len(a) // 3 + break + if Jd is None: + return dense_seq + + T = len(dense_seq) + out = [None if a is None else list(a) for a in dense_seq] + + norm_seq: List[Optional[List[float]]] = [None] * T + + for t in range(T): + arr = out[t] + body = body_pose_seq[t] if t < len(body_pose_seq) else None + if not isinstance(arr, list) or len(arr) != Jd * 3 or not isinstance(body, list): + norm_seq[t] = arr + continue + + if kind == "face": + rs = _body_head_root_scale_from_pose(body, conf_gate=conf_gate_body) + elif kind == "hand_left": + rs = _body_wrist_root_scale_from_pose(body, side="left", conf_gate=conf_gate_body) + else: + rs = _body_wrist_root_scale_from_pose(body, side="right", conf_gate=conf_gate_body) + + if rs is None: + norm_seq[t] = arr + continue + + (rx, ry), s = rs + if s <= 1e-6: + norm_seq[t] = arr + continue + + nn = list(arr) + for j in range(Jd): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + if c >= conf_gate_dense and not (x == 0 and y == 0): + nn[3 * j + 0] = (x - rx) / s + nn[3 * j + 1] = (y - ry) / s + norm_seq[t] = nn + + if median3: + norm_seq = _median3_pose_seq(norm_seq, conf_gate=conf_gate_dense) + + norm_seq = _zero_lag_ema_pose_seq(norm_seq, alpha=zero_lag_alpha, conf_gate=conf_gate_dense) + + for t in range(T): + arrn = norm_seq[t] + body = body_pose_seq[t] if t < len(body_pose_seq) else None + if not isinstance(arrn, list) or len(arrn) != Jd * 3 or not isinstance(body, list): + continue + + if kind == "face": + rs = _body_head_root_scale_from_pose(body, conf_gate=conf_gate_body) + elif kind == "hand_left": + rs = _body_wrist_root_scale_from_pose(body, side="left", conf_gate=conf_gate_body) + else: + rs = _body_wrist_root_scale_from_pose(body, side="right", conf_gate=conf_gate_body) + + if rs is None: + continue + + (rx, ry), s = rs + if s <= 1e-6: + continue + + orig = out[t] + for j in range(Jd): + x = float(arrn[3 * j + 0]) + y = float(arrn[3 * j + 1]) + c = float(arrn[3 * j + 2]) + + ox = float(orig[3 * j + 0]) + oy = float(orig[3 * j + 1]) + oc = float(orig[3 * j + 2]) + + if oc >= conf_gate_dense and not (ox == 0 and oy == 0) and c >= conf_gate_dense: + orig[3 * j + 0] = rx + x * s + orig[3 * j + 1] = ry + y * s + + out[t] = orig + + return out + + +def smooth_KPS_json_obj( + data: Any, + *, + keep_face_untouched: bool = True, + keep_hands_untouched: bool = True, + filter_extra_people: Optional[bool] = None, +) -> Any: + if not isinstance(data, list): + raise ValueError("Expected top-level JSON to be a list of frames.") + + if filter_extra_people is None: + filter_extra_people = bool(FILTER_EXTRA_PEOPLE) + + chosen_people: List[Optional[Dict[str, Any]]] = [None] * len(data) + + if MAIN_PERSON_MODE == "longest_track": + tracks = _build_tracks_over_video(data) + main_tr = _pick_main_track(tracks) + + if main_tr is not None: + for t in range(len(data)): + if t in main_tr.frames: + chosen_people[t] = main_tr.frames[t] + else: + prev_center: Optional[Tuple[float, float]] = None + for i, frame in enumerate(data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or len(people) == 0: + continue + chosen = _choose_single_person(people, prev_center) + chosen_people[i] = chosen + if chosen is not None: + c = _body_center_from_pose(chosen.get("pose_keypoints_2d")) + if c is not None: + prev_center = c + else: + prev_center: Optional[Tuple[float, float]] = None + for i, frame in enumerate(data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or len(people) == 0: + continue + chosen = _choose_single_person(people, prev_center) + chosen_people[i] = chosen + if chosen is not None: + c = _body_center_from_pose(chosen.get("pose_keypoints_2d")) + if c is not None: + prev_center = c + + pose_seq: List[Optional[List[float]]] = [] + for p in chosen_people: + pose_seq.append(p.get("pose_keypoints_2d") if isinstance(p, dict) else None) + + if SPATIAL_OUTLIER_FIX: + pose_seq = [ + _suppress_spatial_outliers_in_pose_arr(arr, conf_gate=CONF_GATE_BODY) if arr is not None else None + for arr in pose_seq + ] + + if GAP_FILL_ENABLED: + pose_seq = _denoise_and_fill_gaps_pose_seq( + pose_seq, + conf_gate=CONF_GATE_BODY, + min_run=MIN_RUN_FRAMES, + max_gap=MAX_GAP_FRAMES, + ) + + if TORSO_SYNC_ENABLED: + pose_seq = _sync_group_appearances( + pose_seq, + group=TORSO_JOINTS, + conf_gate=CONF_GATE_BODY, + lookahead=TORSO_LOOKAHEAD_FRAMES, + ) + + pose_seq = [ + ( + _suppress_isolated_joints_in_pose_arr(arr, conf_gate=CONF_GATE_BODY, keep=TORSO_JOINTS) + if arr is not None + else None + ) + for arr in pose_seq + ] + + if MEDIAN3_ENABLED: + pose_seq = _median3_pose_seq(pose_seq, conf_gate=CONF_GATE_BODY) + + if SUPER_SMOOTH_ENABLED: + pose_seq = _zero_lag_ema_pose_seq(pose_seq, alpha=SUPER_SMOOTH_ALPHA, conf_gate=SUPER_SMOOTH_MIN_CONF) + + if ROOTSCALE_CARRY_ENABLED: + pose_seq = _carry_pose_when_torso_missing( + pose_seq, + conf_gate=CARRY_CONF_GATE, + max_carry=CARRY_MAX_FRAMES, + anchor_joints=CARRY_ANCHOR_JOINTS, + min_anchors=CARRY_MIN_ANCHORS, + ) + + pose_seq = _force_full_torso_pair( + pose_seq, + conf_gate=CARRY_CONF_GATE, + anchor_joints=CARRY_ANCHOR_JOINTS, + min_anchors=CARRY_MIN_ANCHORS, + max_lookback=240, + fill_legs_with_hip=True, + always_fill_if_one_hip=True, + ) + + face_seq: List[Optional[List[float]]] = [] + lh_seq: List[Optional[List[float]]] = [] + rh_seq: List[Optional[List[float]]] = [] + + for p in chosen_people: + if isinstance(p, dict): + face_seq.append(p.get("face_keypoints_2d")) + lh_seq.append(p.get("hand_left_keypoints_2d")) + rh_seq.append(p.get("hand_right_keypoints_2d")) + else: + face_seq.append(None) + lh_seq.append(None) + rh_seq.append(None) + + if HANDS_SMOOTH_ENABLED and (not keep_hands_untouched): + lh_seq = [ + _suppress_spatial_outliers_in_hand_arr(a, conf_gate=CONF_GATE_HAND) if a is not None else None + for a in lh_seq + ] + rh_seq = [ + _suppress_spatial_outliers_in_hand_arr(a, conf_gate=CONF_GATE_HAND) if a is not None else None + for a in rh_seq + ] + + lh_seq = _remove_short_presence_runs_kps_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT, min_run=MIN_HAND_RUN_FRAMES + ) + rh_seq = _remove_short_presence_runs_kps_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT, min_run=MIN_HAND_RUN_FRAMES + ) + + lh_seq = _zero_sparse_frames_kps_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT + ) + rh_seq = _zero_sparse_frames_kps_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT + ) + + if DENSE_GAP_FILL_ENABLED: + lh_seq = _denoise_and_fill_gaps_pose_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + rh_seq = _denoise_and_fill_gaps_pose_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + + if FACE_SMOOTH_ENABLED and (not keep_face_untouched): + if DENSE_GAP_FILL_ENABLED: + face_seq = _denoise_and_fill_gaps_pose_seq( + face_seq, conf_gate=CONF_GATE_FACE, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + + if FACE_SMOOTH_ENABLED and (not keep_face_untouched): + face_seq = _smooth_dense_seq_anchored_to_body( + face_seq, + pose_seq, + kind="face", + conf_gate_dense=CONF_GATE_FACE, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + + if HANDS_SMOOTH_ENABLED and (not keep_hands_untouched): + lh_seq = _smooth_dense_seq_anchored_to_body( + lh_seq, + pose_seq, + kind="hand_left", + conf_gate_dense=CONF_GATE_HAND, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + rh_seq = _smooth_dense_seq_anchored_to_body( + rh_seq, + pose_seq, + kind="hand_right", + conf_gate_dense=CONF_GATE_HAND, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + + out_frames = [] + body_state: Optional[BodyState] = None + + for i, frame in enumerate(data): + if not isinstance(frame, dict): + out_frames.append(frame) + continue + + frame_out = copy.deepcopy(frame) + chosen = chosen_people[i] + + if chosen is None: + if filter_extra_people: + frame_out["people"] = [] + out_frames.append(frame_out) + continue + + p_out = copy.deepcopy(chosen) + p_out["pose_keypoints_2d"] = pose_seq[i] + + pose_arr = p_out.get("pose_keypoints_2d") + joints = (len(pose_arr) // 3) if isinstance(pose_arr, list) else 0 + if body_state is None: + body_state = BodyState(joints if joints > 0 else 18) + + p_out["pose_keypoints_2d"] = _smooth_body_pose(p_out.get("pose_keypoints_2d"), body_state) + + if FACE_SMOOTH_ENABLED and (not keep_face_untouched): + p_out["face_keypoints_2d"] = face_seq[i] + else: + p_out["face_keypoints_2d"] = chosen.get("face_keypoints_2d", p_out.get("face_keypoints_2d")) + + if HANDS_SMOOTH_ENABLED and (not keep_hands_untouched): + p_out["hand_left_keypoints_2d"] = lh_seq[i] + p_out["hand_right_keypoints_2d"] = rh_seq[i] + else: + p_out["hand_left_keypoints_2d"] = chosen.get("hand_left_keypoints_2d", p_out.get("hand_left_keypoints_2d")) + p_out["hand_right_keypoints_2d"] = chosen.get( + "hand_right_keypoints_2d", p_out.get("hand_right_keypoints_2d") + ) + + _pin_body_wrist_to_hand( + p_out, side="left", conf_gate_body=CONF_GATE_BODY, conf_gate_hand=CONF_GATE_HAND, blend=1.0 + ) + _pin_body_wrist_to_hand( + p_out, side="right", conf_gate_body=CONF_GATE_BODY, conf_gate_hand=CONF_GATE_HAND, blend=1.0 + ) + + _fix_elbow_using_wrist(p_out, side="left", conf_gate=CONF_GATE_BODY) + _fix_elbow_using_wrist(p_out, side="right", conf_gate=CONF_GATE_BODY) + + if filter_extra_people: + frame_out["people"] = [p_out] + else: + orig_people = frame.get("people", []) + if not isinstance(orig_people, list): + frame_out["people"] = [p_out] + else: + replaced = False + new_people = [] + for op in orig_people: + if (not replaced) and (op is chosen): + new_people.append(p_out) + replaced = True + else: + new_people.append(copy.deepcopy(op)) + if not replaced: + new_people = [p_out] + [copy.deepcopy(op) for op in orig_people] + frame_out["people"] = new_people + + out_frames.append(frame_out) + + return out_frames + + +# ============================================================ +# === END: smooth_KPS_json.py logic +# ============================================================ + + +# ============================================================ +# === START: render_pose_video.py logic (ported to frame render) +# ============================================================ + +OP_COLORS: List[Tuple[int, int, int]] = [ + (255, 0, 0), + (255, 85, 0), + (255, 170, 0), + (255, 255, 0), + (170, 255, 0), + (85, 255, 0), + (0, 255, 0), + (0, 255, 85), + (0, 255, 170), + (0, 255, 255), + (0, 170, 255), + (0, 85, 255), + (0, 0, 255), + (85, 0, 255), + (170, 0, 255), + (255, 0, 255), + (255, 0, 170), + (255, 0, 85), +] + +BODY_EDGES: List[Tuple[int, int]] = [ + (1, 2), + (1, 5), + (2, 3), + (3, 4), + (5, 6), + (6, 7), + (1, 8), + (8, 9), + (9, 10), + (1, 11), + (11, 12), + (12, 13), + (1, 0), + (0, 14), + (14, 16), + (0, 15), + (15, 17), +] + +BODY_EDGE_COLORS = OP_COLORS[: len(BODY_EDGES)] +BODY_JOINT_COLORS = OP_COLORS + +HAND_EDGES: List[Tuple[int, int]] = [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (0, 5), + (5, 6), + (6, 7), + (7, 8), + (0, 9), + (9, 10), + (10, 11), + (11, 12), + (0, 13), + (13, 14), + (14, 15), + (15, 16), + (0, 17), + (17, 18), + (18, 19), + (19, 20), +] + + +def _valid_pt(x: float, y: float, c: float, conf_thresh: float) -> bool: + return (c is not None) and (c >= conf_thresh) and not (x == 0 and y == 0) + + +def _hsv_to_bgr(h: float, s: float, v: float) -> Tuple[int, int, int]: + H = int(np.clip(h, 0.0, 1.0) * 179.0) + S = int(np.clip(s, 0.0, 1.0) * 255.0) + V = int(np.clip(v, 0.0, 1.0) * 255.0) + hsv = np.uint8([[[H, S, V]]]) + bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0] + return int(bgr[0]), int(bgr[1]), int(bgr[2]) + + +def _looks_normalized(points: List[Tuple[float, float, float]], conf_thresh: float) -> bool: + valid = [(x, y, c) for (x, y, c) in points if _valid_pt(x, y, c, conf_thresh)] + if not valid: + return False + in01 = sum(1 for (x, y, _) in valid if 0.0 <= x <= 1.0 and 0.0 <= y <= 1.0) + return (in01 / float(len(valid))) >= 0.7 + + +def _draw_body( + canvas: np.ndarray, pose: List[Tuple[float, float, float]], conf_thresh: float, xinsr_stick_scaling: bool = False +) -> None: + CH, CW = canvas.shape[:2] + stickwidth = 2 + + valid = [(x, y, c) for (x, y, c) in pose if _valid_pt(x, y, c, conf_thresh)] + norm = False + if valid: + in01 = sum(1 for (x, y, _) in valid if 0.0 <= x <= 1.0 and 0.0 <= y <= 1.0) + norm = (in01 / float(len(valid))) >= 0.7 + + def to_px(x: float, y: float) -> Tuple[float, float]: + if norm: + return x * CW, y * CH + return x, y + + max_side = max(CW, CH) + if xinsr_stick_scaling: + stick_scale = 1 if max_side < 500 else min(2 + (max_side // 1000), 7) + else: + stick_scale = 1 + + for idx, (a, b) in enumerate(BODY_EDGES): + if a >= len(pose) or b >= len(pose): + continue + + ax, ay, ac = pose[a] + bx, by, bc = pose[b] + if not (_valid_pt(ax, ay, ac, conf_thresh) and _valid_pt(bx, by, bc, conf_thresh)): + continue + + ax, ay = to_px(ax, ay) + bx, by = to_px(bx, by) + + base = BODY_EDGE_COLORS[idx] if idx < len(BODY_EDGE_COLORS) else (255, 255, 255) + + X = np.array([ay, by], dtype=np.float32) + Y = np.array([ax, bx], dtype=np.float32) + + mX = float(np.mean(X)) + mY = float(np.mean(Y)) + length = float(np.hypot(X[0] - X[1], Y[0] - Y[1])) + if length < 1.0: + continue + + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + + polygon = cv2.ellipse2Poly( + (int(mY), int(mX)), + (int(length / 2), int(stickwidth * stick_scale)), + int(angle), + 0, + 360, + 1, + ) + + cv2.fillConvexPoly( + canvas, + polygon, + (int(base[0] * 0.6), int(base[1] * 0.6), int(base[2] * 0.6)), + ) + + for j, (x, y, c) in enumerate(pose): + if not _valid_pt(x, y, c, conf_thresh): + continue + x, y = to_px(x, y) + col = BODY_JOINT_COLORS[j] if j < len(BODY_JOINT_COLORS) else (255, 255, 255) + cv2.circle(canvas, (int(x), int(y)), 2, col, thickness=-1) + + +def _draw_hand(canvas: np.ndarray, hand: List[Tuple[float, float, float]], conf_thresh: float) -> None: + if not hand or len(hand) < 21: + return + + CH, CW = canvas.shape[:2] + norm = _looks_normalized(hand, conf_thresh) + + def to_px(x: float, y: float) -> Tuple[float, float]: + return (x * CW, y * CH) if norm else (x, y) + + n_edges = len(HAND_EDGES) + for i, (a, b) in enumerate(HAND_EDGES): + x1, y1, c1 = hand[a] + x2, y2, c2 = hand[b] + if _valid_pt(x1, y1, c1, conf_thresh) and _valid_pt(x2, y2, c2, conf_thresh): + x1, y1 = to_px(x1, y1) + x2, y2 = to_px(x2, y2) + bgr = _hsv_to_bgr(i / float(n_edges), 1.0, 1.0) + cv2.line(canvas, (int(x1), int(y1)), (int(x2), int(y2)), bgr, 1, cv2.LINE_AA) + + for x, y, c in hand: + if _valid_pt(x, y, c, conf_thresh): + x, y = to_px(x, y) + cv2.circle(canvas, (int(x), int(y)), 1, (0, 0, 255), -1, cv2.LINE_AA) + + +def _draw_face(canvas: np.ndarray, face: List[Tuple[float, float, float]], conf_thresh: float) -> None: + if not face: + return + + CH, CW = canvas.shape[:2] + norm = _looks_normalized(face, conf_thresh) + + def to_px(x: float, y: float) -> Tuple[float, float]: + return (x * CW, y * CH) if norm else (x, y) + + for x, y, c in face: + if _valid_pt(x, y, c, conf_thresh): + x, y = to_px(x, y) + cv2.circle(canvas, (int(x), int(y)), 0, (255, 255, 255), -1, cv2.LINE_AA) + + +def _draw_pose_frame_full( + w: int, + h: int, + person: Dict[str, Any], + conf_thresh_body: float = 0.10, + conf_thresh_hands: float = 0.10, + conf_thresh_face: float = 0.10, +) -> np.ndarray: + img = np.zeros((h, w, 3), dtype=np.uint8) + + pose = _reshape_keypoints_2d(person.get("pose_keypoints_2d") or []) + face = _reshape_keypoints_2d(person.get("face_keypoints_2d") or []) + hand_l = _reshape_keypoints_2d(person.get("hand_left_keypoints_2d") or []) + hand_r = _reshape_keypoints_2d(person.get("hand_right_keypoints_2d") or []) + + if pose: + _draw_body(img, pose, conf_thresh_body) + if hand_l: + _draw_hand(img, hand_l, conf_thresh_hands) + if hand_r: + _draw_hand(img, hand_r, conf_thresh_hands) + if face: + _draw_face(img, face, conf_thresh_face) + + return img + + +# ============================================================ +# === END: render_pose_video.py logic +# ============================================================ + + +# ============================================================ +# ComfyUI mappings +# ============================================================ + +NODE_CLASS_MAPPINGS = { + "TSPoseDataSmoother": KPSSmoothPoseDataAndRender, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TSPoseDataSmoother": "KPS: Smooth + Render (pose_data/PKL)", +} diff --git a/zavodik/nodes/comfyui-animator-nodes/rename_files.py b/zavodik/nodes/comfyui-animator-nodes/rename_files.py new file mode 100644 index 0000000000000000000000000000000000000000..8bf2b1cb59fd84f23606cec91304fe07ce87fcbc --- /dev/null +++ b/zavodik/nodes/comfyui-animator-nodes/rename_files.py @@ -0,0 +1,200 @@ +import os +import re +import uuid +import shutil + + +def extract_first_number(s: str): + match = re.search(r"\d+", s) + return int(match.group()) if match else float("inf") + + +sort_methods = [ + "None", + "Alphabetical (ASC)", + "Alphabetical (DESC)", + "Numerical (ASC)", + "Numerical (DESC)", + "Datetime (ASC)", + "Datetime (DESC)", +] + + +def sort_by(items, base_path=".", method=None): + def fullpath(x): + return os.path.join(base_path, x) + + def get_timestamp(path): + try: + return os.path.getmtime(path) + except FileNotFoundError: + return float("-inf") + + if method == "Alphabetical (ASC)": + return sorted(items) + elif method == "Alphabetical (DESC)": + return sorted(items, reverse=True) + elif method == "Numerical (ASC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0])) + elif method == "Numerical (DESC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True) + elif method == "Datetime (ASC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x))) + elif method == "Datetime (DESC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True) + else: + return items + + +def _safe_list_files(directory: str): + return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] + + +def _format_name(index: int, digits: int, prefix: str, ext: str): + """ + ext ожидается как ".png"/".jpg"/".jpeg" (с точкой). + ВАЖНО: underscore после номера ВСЕГДА, потом расширение как есть. + Пример: prefix_0001_.png + """ + num = str(index).zfill(digits) + left = f"{prefix}_" if prefix else "" + return f"{left}{num}_{ext}" + + +def _index_taken(directory: str, digits: int, prefix: str, index: int) -> bool: + """ + Проверяем, занят ли номер index ЛЮБЫМ расширением в папке. + Т.е. если есть prefix_0001_.png, то prefix_0001_.jpg уже нельзя. + """ + num = str(index).zfill(digits) + left = f"{prefix}_" if prefix else "" + start = f"{left}{num}_" + + try: + entries = os.listdir(directory) + except FileNotFoundError: + return False + + for f in entries: + p = os.path.join(directory, f) + if os.path.isfile(p) and f.startswith(start): + return True + return False + + +def _find_next_free_index(directory: str, digits: int, prefix: str, start_from: int = 1) -> int: + idx = max(1, int(start_from)) + while _index_taken(directory, digits, prefix, idx): + idx += 1 + return idx + + +class RenameFilesInDir: + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "directory": ("STRING", {"default": ""}), + }, + "optional": { + "output_directory": ("STRING", {"default": ""}), + "sort_method": (sort_methods,), + "start_index": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "step": 1}), + "files_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "prefix": ("STRING", {"default": ""}), + "digits": ("INT", {"default": 4, "min": 1, "max": 16, "step": 1}), + }, + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("COUNT",) + FUNCTION = "run" + CATEGORY = "InspirePack/files" + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + def run( + self, + directory: str, + output_directory: str = "", + sort_method=None, + start_index: int = 0, + files_load_cap: int = 0, + prefix: str = "", + digits: int = 4, + ): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory}' cannot be found.") + + files = _safe_list_files(directory) + if not files: + return (0,) + + files = sort_by(files, directory, sort_method) + files = files[start_index:] + + if files_load_cap > 0: + files = files[:files_load_cap] + + if not files: + return (0,) + + inplace = (output_directory is None) or (str(output_directory).strip() == "") + + if not inplace: + os.makedirs(output_directory, exist_ok=True) + + count = 0 + + # ---------- COPY MODE ---------- + if not inplace: + for fname in files: + src = os.path.join(directory, fname) + _, ext = os.path.splitext(fname) # ext = ".png" / ".jpg" / ... + + next_idx = _find_next_free_index(output_directory, digits, prefix, start_from=1) + new_name = _format_name(next_idx, digits, prefix, ext) + + dst = os.path.join(output_directory, new_name) + shutil.copy2(src, dst) + count += 1 + + return (count,) + + # ---------- INPLACE RENAME ---------- + temp_map = [] + used_temp = set() + + def _make_temp_name(old_name: str): + while True: + t = f"__tmp__{uuid.uuid4().hex}__{old_name}" + if t not in used_temp and not os.path.exists(os.path.join(directory, t)): + used_temp.add(t) + return t + + # phase1 -> temp + for fname in files: + old_path = os.path.join(directory, fname) + tmp = _make_temp_name(fname) + tmp_path = os.path.join(directory, tmp) + + os.rename(old_path, tmp_path) + temp_map.append((tmp, fname)) + + # phase2 -> final + for tmp, original_name in temp_map: + tmp_path = os.path.join(directory, tmp) + _, ext = os.path.splitext(original_name) + + next_idx = _find_next_free_index(directory, digits, prefix, start_from=1) + new_name = _format_name(next_idx, digits, prefix, ext) + + new_path = os.path.join(directory, new_name) + os.rename(tmp_path, new_path) + count += 1 + + return (count,) diff --git a/zavodik/nodes/comfyui-animator-nodes/requirements.txt b/zavodik/nodes/comfyui-animator-nodes/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb3738c2299fbaa9ad35f3cf60f50a284a883e99 --- /dev/null +++ b/zavodik/nodes/comfyui-animator-nodes/requirements.txt @@ -0,0 +1,2 @@ +numpy +opencv-python diff --git a/zavodik/nodes/comfyui-animator-nodes/save_load_pose.py b/zavodik/nodes/comfyui-animator-nodes/save_load_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..be9bae74e8bf53a44b79584e33f616351d8805e3 --- /dev/null +++ b/zavodik/nodes/comfyui-animator-nodes/save_load_pose.py @@ -0,0 +1,128 @@ +import os +import time +import pickle +import glob +import folder_paths + + +def _ensure_output_dir(): + out_dir = folder_paths.get_output_directory() + os.makedirs(out_dir, exist_ok=True) + return out_dir + + +# ------------------------- +# UI: list PKL/PT under input/** (recursive) +# ------------------------- +def _list_all_pkl_under_input(): + inp = folder_paths.get_input_directory() + exts = (".pkl", ".pickle", ".pt") + + files = [] + for ext in exts: + pattern = os.path.join(inp, "**", f"*{ext}") + files.extend(glob.glob(pattern, recursive=True)) + + rel = [] + for f in files: + if os.path.isfile(f): + r = os.path.relpath(f, inp).replace("\\", "/") + rel.append(r) + + rel = sorted(set(rel)) + return rel if rel else [""] + + +def _abs_from_input(rel_path: str) -> str: + inp = folder_paths.get_input_directory() + return os.path.join(inp, rel_path).replace("\\", "/") + + +def _make_unique_path(base_path: str) -> str: + """ + If file exists, append incremental suffix: + pose_data.pkl + pose_data_0001.pkl + pose_data_0002.pkl + """ + if not os.path.exists(base_path): + return base_path + + directory = os.path.dirname(base_path) + name = os.path.basename(base_path) + base, ext = os.path.splitext(name) + + idx = 1 + while True: + new_name = f"{base}_{idx:04d}{ext}" + new_path = os.path.join(directory, new_name) + if not os.path.exists(new_path): + return new_path + idx += 1 + + +def _default_filename(prefix: str, ext: str): + ts = time.strftime("%Y%m%d_%H%M%S") + return f"{prefix}_{ts}{ext}" + + +class TSSavePoseDataAsPickle: + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pose_data": ("POSEDATA",), + "filename": ("STRING", {"default": ""}), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("saved_path",) + FUNCTION = "save" + CATEGORY = "save" + + def save(self, pose_data, filename=""): + out_dir = _ensure_output_dir() + filename = (filename or "").strip() + if not filename: + filename = _default_filename("pose_data", ".pkl") + if not filename.lower().endswith((".pkl", ".pickle")): + filename += ".pkl" + + abs_path = _make_unique_path(os.path.join(out_dir, filename)) + + with open(abs_path, "wb") as f: + pickle.dump(pose_data, f, protocol=pickle.HIGHEST_PROTOCOL) + + return (abs_path,) + + +class TSLoadPoseDataPickle: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + # dropdown + Upload, recursive input/** + "file": (_list_all_pkl_under_input(),), + } + } + + RETURN_TYPES = ("POSEDATA",) + RETURN_NAMES = ("pose_data",) + FUNCTION = "load" + CATEGORY = "load" + + def load(self, file): + if not isinstance(file, str) or not file.strip(): + raise ValueError("TS PoseData Pickle: Please select a .pkl/.pt file.") + + abs_path = _abs_from_input(file) + if not os.path.isfile(abs_path): + raise ValueError(f"TS PoseData Pickle: File not found: {abs_path}") + + with open(abs_path, "rb") as f: + pose_data = pickle.load(f) + + return (pose_data,) diff --git a/zavodik/nodes/comfyui-teskors-utils-main/README.md b/zavodik/nodes/comfyui-teskors-utils-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__init__.py b/zavodik/nodes/comfyui-teskors-utils-main/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d3ce640495fe0bab5477dc73484c3f241da1a90 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils-main/__init__.py @@ -0,0 +1,20 @@ +from .save_load_pose import TSSavePoseDataAsPickle, TSLoadPoseDataPickle +from .openpose_smoother import KPSSmoothPoseDataAndRender +from .load_video_batch import LoadVideoBatchListFromDir +from .rename_files import RenameFilesInDir + +NODE_CLASS_MAPPINGS = { + "TSSavePoseDataAsPickle": TSSavePoseDataAsPickle, + "TSLoadPoseDataPickle": TSLoadPoseDataPickle, + "TSPoseDataSmoother": KPSSmoothPoseDataAndRender, + "TSLoadVideoBatchListFromDir": LoadVideoBatchListFromDir, + "TSRenameFilesInDir": RenameFilesInDir, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TSSavePoseDataAsPickle": "TS Save Pose Data (PKL)", + "TSLoadPoseDataPickle": "TS Load Pose Data (PKL)", + "TSPoseDataSmoother": "TS Pose Data Smoother", + "TSLoadVideoBatchListFromDir": "TS Load Video Batch List From Dir", + "TSRenameFilesInDir": "TS Rename Files In Dir", +} diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bff209226a9b8492f573d461a8d080817b4da63 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/load_video_batch.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/load_video_batch.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0423947beb70686554f26b70b451f67061167617 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/load_video_batch.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/openpose_smoother.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/openpose_smoother.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6040361f0026e1fb1d766ec368026d898ca679db Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/openpose_smoother.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/rename_files.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/rename_files.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15d434035ce1c3ada97bef2519d0f6c0647ed26e Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/rename_files.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/save_load_pose.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/save_load_pose.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c98b127e4707fa2d22bf3a21c8a17bd8ed67ad94 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/save_load_pose.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/utils.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84e0546e3643220b3d5a2cdfd2fce01224dab8b9 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils-main/__pycache__/utils.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils-main/load_video_batch.py b/zavodik/nodes/comfyui-teskors-utils-main/load_video_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ae5835bf342610aa1d53107bfb4c8b84ebf8fb --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils-main/load_video_batch.py @@ -0,0 +1,351 @@ +import os +import re +import shutil +import subprocess +import time +from collections.abc import Mapping + +import torch +import numpy as np + +# OpenCV for video decoding +try: + import cv2 + + _has_cv2 = True +except Exception: + _has_cv2 = False + + +# ========================= +# AUDIO (встроено из utils) +# ========================= +ENCODE_ARGS = ("utf-8", "backslashreplace") + + +def _pick_ffmpeg_path(): + # 1) env override (как в VHS) + if "VHS_FORCE_FFMPEG_PATH" in os.environ: + p = os.environ.get("VHS_FORCE_FFMPEG_PATH") + if p: + return p + + # 2) system ffmpeg + system_ffmpeg = shutil.which("ffmpeg") + if system_ffmpeg is not None: + return system_ffmpeg + + # 3) local рядом + if os.path.isfile("ffmpeg"): + return os.path.abspath("ffmpeg") + if os.path.isfile("ffmpeg.exe"): + return os.path.abspath("ffmpeg.exe") + + return None + + +ffmpeg_path = _pick_ffmpeg_path() + + +def get_audio(file, start_time=0, duration=0): + if ffmpeg_path is None: + raise Exception("ffmpeg not found. Put ffmpeg in PATH, or set VHS_FORCE_FFMPEG_PATH env var.") + + args = [ffmpeg_path, "-i", file] + if start_time > 0: + args += ["-ss", str(start_time)] + if duration > 0: + args += ["-t", str(duration)] + + try: + # как в utils: вытаскиваем raw f32le в stdout + res = subprocess.run(args + ["-f", "f32le", "-"], capture_output=True, check=True) + audio = torch.frombuffer(bytearray(res.stdout), dtype=torch.float32) + match = re.search(r", (\d+) Hz, (\w+), ", res.stderr.decode(*ENCODE_ARGS)) + except subprocess.CalledProcessError as e: + raise Exception(f"Failed to extract audio from {file}:\n" + e.stderr.decode(*ENCODE_ARGS)) + + if match: + ar = int(match.group(1)) + ac = {"mono": 1, "stereo": 2}.get(match.group(2), 2) + else: + ar = 44100 + ac = 2 + + # reshape как в utils: (-1, channels) -> (channels, samples) -> (1, channels, samples) + if audio.numel() == 0: + # видео без аудио — вернем пустой аудиобуфер, но корректный формат + empty = torch.zeros((1, 1, 0), dtype=torch.float32) + return {"waveform": empty, "sample_rate": ar} + + audio = audio.reshape((-1, ac)).transpose(0, 1).unsqueeze(0) + return {"waveform": audio, "sample_rate": ar} + + +class LazyAudioMap(Mapping): + def __init__(self, file, start_time, duration): + self.file = file + self.start_time = start_time + self.duration = duration + self._dict = None + + def _ensure(self): + if self._dict is None: + self._dict = get_audio(self.file, self.start_time, self.duration) + + def __getitem__(self, key): + self._ensure() + return self._dict[key] + + def __iter__(self): + self._ensure() + return iter(self._dict) + + def __len__(self): + self._ensure() + return len(self._dict) + + +def lazy_get_audio(file, start_time=0, duration=0, **kwargs): + return LazyAudioMap(file, start_time, duration) + + +# ========================= +# остальной код ноды +# ========================= + + +def extract_first_number(s): + match = re.search(r"\d+", s) + return int(match.group()) if match else float("inf") + + +sort_methods = [ + "None", + "Alphabetical (ASC)", + "Alphabetical (DESC)", + "Numerical (ASC)", + "Numerical (DESC)", + "Datetime (ASC)", + "Datetime (DESC)", +] + + +def sort_by(items, base_path=".", method=None): + def fullpath(x): + return os.path.join(base_path, x) + + def get_timestamp(path): + try: + return os.path.getmtime(path) + except FileNotFoundError: + return float("-inf") + + if method == "Alphabetical (ASC)": + return sorted(items) + elif method == "Alphabetical (DESC)": + return sorted(items, reverse=True) + elif method == "Numerical (ASC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0])) + elif method == "Numerical (DESC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True) + elif method == "Datetime (ASC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x))) + elif method == "Datetime (DESC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True) + else: + return items + + +def target_size(width, height, custom_width, custom_height, downscale_ratio=8): + if downscale_ratio is None: + downscale_ratio = 8 + + if custom_width == 0 and custom_height == 0: + new_w, new_h = width, height + elif custom_height == 0: + new_h = int(height * (custom_width / width)) + new_w = int(custom_width) + elif custom_width == 0: + new_w = int(width * (custom_height / height)) + new_h = int(custom_height) + else: + new_w, new_h = int(custom_width), int(custom_height) + + new_w = int(new_w / downscale_ratio + 0.5) * downscale_ratio + new_h = int(new_h / downscale_ratio + 0.5) * downscale_ratio + return new_w, new_h + + +def _read_frames_vhs_like( + video_path: str, + force_rate: float = 0, + custom_width: int = 0, + custom_height: int = 0, + downscale_ratio: int = 8, + frame_load_cap: int = 0, +): + if not _has_cv2: + raise RuntimeError("OpenCV (cv2) not available. Install opencv-python.") + + cap = cv2.VideoCapture(video_path) + if not cap.isOpened() or not cap.grab(): + raise FileNotFoundError(f"Cannot open video: {video_path}") + + fps = cap.get(cv2.CAP_PROP_FPS) + if fps is None or fps <= 0: + fps = 30.0 + + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + ok0, frame0 = cap.retrieve() + if not ok0 or frame0 is None: + cap.release() + raise RuntimeError(f"Cannot retrieve first frame from: {video_path}") + + if width <= 0 or height <= 0: + height, width = frame0.shape[:2] + + base_dt = 1.0 / float(fps) + target_dt = base_dt if force_rate == 0 else (1.0 / float(force_rate)) + loaded_fps = 1.0 / target_dt if target_dt > 0 else float(fps) + + new_w, new_h = target_size(width, height, custom_width, custom_height, downscale_ratio) + do_resize = (new_w != width) or (new_h != height) + + frames = [] + time_offset = target_dt + + def _process_frame(bgr): + rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) + if do_resize: + rgb = cv2.resize(rgb, (new_w, new_h), interpolation=cv2.INTER_LANCZOS4) + return rgb + + frames.append(_process_frame(frame0)) + if frame_load_cap > 0 and len(frames) >= frame_load_cap: + cap.release() + arr = np.stack(frames, axis=0).astype(np.float32) / 255.0 + t = torch.from_numpy(arr) + return t, float(fps), float(loaded_fps), float(len(t) * target_dt), 0.0 + + time_offset -= target_dt + + while cap.isOpened(): + if time_offset < target_dt: + ok = cap.grab() + if not ok: + break + time_offset += base_dt + continue + + ok, frame_bgr = cap.retrieve() + if not ok or frame_bgr is None: + break + + frames.append(_process_frame(frame_bgr)) + + if frame_load_cap > 0 and len(frames) >= frame_load_cap: + break + + time_offset -= target_dt + + cap.release() + + if len(frames) == 0: + raise RuntimeError(f"No frames could be read from: {video_path}") + + arr = np.stack(frames, axis=0).astype(np.float32) / 255.0 + t = torch.from_numpy(arr) + loaded_duration = float(len(t) * target_dt) + return t, float(fps), float(loaded_fps), loaded_duration, 0.0 + + +class LoadVideoBatchListFromDir: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"default": ""}), + "force_rate": ("FLOAT", {"default": 0, "min": 0, "max": 120, "step": 1}), + "width": ("INT", {"default": 720, "min": 0, "max": 8192, "step": 1}), + "height": ("INT", {"default": 1280, "min": 0, "max": 8192, "step": 1}), + }, + "optional": { + "video_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "step": 1}), + "load_always": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "sort_method": (sort_methods,), + }, + } + + RETURN_TYPES = ("IMAGE", "AUDIO", "INT") + RETURN_NAMES = ("IMAGE", "audio", "COUNT") + OUTPUT_IS_LIST = (True, True, False) + + FUNCTION = "load_videos" + CATEGORY = "video" + + @classmethod + def IS_CHANGED(cls, **kwargs): + if kwargs.get("load_always"): + return float("NaN") + return hash(frozenset(kwargs.items())) + + def load_videos( + self, + directory: str, + force_rate: float = 0, + width: int = 0, + height: int = 0, + video_load_cap: int = 0, + frame_load_cap: int = 0, + start_index: int = 0, + load_always: bool = False, + sort_method=None, + ): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory}' cannot be found.") + + files = os.listdir(directory) + if len(files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + + valid_ext = {".mp4", ".mov", ".mkv", ".webm", ".avi", ".m4v"} + files = [ + f + for f in files + if os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1].lower() in valid_ext + ] + if len(files) == 0: + raise FileNotFoundError(f"No video files in directory '{directory}' (expected: {sorted(valid_ext)}).") + + files = sort_by(files, directory, sort_method) + files = files[start_index:] + if video_load_cap > 0: + files = files[:video_load_cap] + + images_list = [] + audios_list = [] + + for fname in files: + path = os.path.join(directory, fname) + + vid, source_fps, loaded_fps, loaded_duration, start_time = _read_frames_vhs_like( + path, + force_rate=force_rate, + custom_width=width, + custom_height=height, + downscale_ratio=8, + frame_load_cap=frame_load_cap, + ) + + images_list.append(vid) + + # duration based on loaded frames/time + audio = lazy_get_audio(path, start_time, loaded_duration) + audios_list.append(audio) + + return (images_list, audios_list, len(images_list)) diff --git a/zavodik/nodes/comfyui-teskors-utils-main/openpose_smoother.py b/zavodik/nodes/comfyui-teskors-utils-main/openpose_smoother.py new file mode 100644 index 0000000000000000000000000000000000000000..80a63a5ac823fad26a37f8b6b3a086d69bfc3c9a --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils-main/openpose_smoother.py @@ -0,0 +1,2481 @@ +from __future__ import annotations + +import copy +import math +import pickle +import threading +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import cv2 +import torch + + +# ============================================================ +# ComfyUI Node (pose_data + PKL) +# ============================================================ + +_GLOBAL_LOCK = threading.Lock() + + +class KPSSmoothPoseDataAndRender: + """ + Сглаживание + рендер позы. + Вход: POSEDATA (как объект/dict; обычно приходит из TSLoadPoseDataPickle). + Выход: IMAGE (torch [T,H,W,3] float 0..1), POSEDATA (в том же формате, но сглаженный). + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "pose_data": ("POSEDATA",), # <-- ВАЖНО: именно POSEDATA + "filter_extra_people": ("BOOLEAN", {"default": True}), + # общий набор параметров сглаживания (вместо body + face_hands) + "smooth_alpha": ("FLOAT", {"default": 0.7, "min": 0.01, "max": 0.99, "step": 0.01}), + "gap_frames": ("INT", {"default": 12, "min": 0, "max": 100, "step": 1}), + "min_run_frames": ("INT", {"default": 2, "min": 1, "max": 60, "step": 1}), + # пороги отрисовки (в инпут добавляем body/hands, face НЕ добавляем) + "conf_thresh_body": ("FLOAT", {"default": 0.20, "min": 0.0, "max": 1.0, "step": 0.01}), + "conf_thresh_hands": ("FLOAT", {"default": 0.50, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("IMAGE", "POSEDATA") # <-- ВАЖНО: именно POSEDATA + RETURN_NAMES = ("IMAGE", "pose_data") + FUNCTION = "run" + CATEGORY = "posedata" + + def run(self, pose_data, **kwargs): + filter_extra_people = bool(kwargs.get("filter_extra_people", True)) + + # общий набор + smooth_alpha = float(kwargs.get("smooth_alpha", 0.7)) + gap_frames = int(kwargs.get("gap_frames", 12)) + min_run_frames = int(kwargs.get("min_run_frames", 2)) + + # пороги рендера + conf_thresh_body = float(kwargs.get("conf_thresh_body", 0.20)) + conf_thresh_hands = float(kwargs.get("conf_thresh_hands", 0.50)) + conf_thresh_face = 0.20 # <- НЕ добавляем в INPUT, но фиксируем как ты просил + + force_body_18 = bool(kwargs.get("force_body_18", False)) + + pose_data = _coerce_pose_data_to_obj(pose_data) + + # pose_data -> frames_json_like + frames_json_like, meta_ref = _pose_data_to_kps_frames(pose_data, force_body_18=force_body_18) + + with _GLOBAL_LOCK: + old = _snapshot_tunable_globals() + try: + # BODY + globals()["ALPHA_BODY"] = smooth_alpha + globals()["SUPER_SMOOTH_ALPHA"] = smooth_alpha + globals()["MAX_GAP_FRAMES"] = gap_frames + globals()["MIN_RUN_FRAMES"] = min_run_frames + + # FACE+HANDS (dense) тоже от общего набора + globals()["DENSE_SUPER_SMOOTH_ALPHA"] = smooth_alpha + globals()["DENSE_MAX_GAP_FRAMES"] = gap_frames + globals()["DENSE_MIN_RUN_FRAMES"] = min_run_frames + + globals()["FILTER_EXTRA_PEOPLE"] = filter_extra_people + + smoothed_frames = smooth_KPS_json_obj( + frames_json_like, + keep_face_untouched=False, + keep_hands_untouched=False, + filter_extra_people=filter_extra_people, + ) + finally: + _restore_tunable_globals(old) + + # frames_json_like -> pose_data (обратно в pose_metas) + out_pose_data = _kps_frames_to_pose_data(pose_data, smoothed_frames, meta_ref, force_body_18=force_body_18) + + # render + w, h = _extract_canvas_wh(smoothed_frames, default_w=720, default_h=1280) + frames_np = [] + for fr in smoothed_frames: + if isinstance(fr, dict) and fr.get("people"): + img = _draw_pose_frame_full( + w, + h, + fr["people"][0], + conf_thresh_body=conf_thresh_body, + conf_thresh_hands=conf_thresh_hands, + conf_thresh_face=conf_thresh_face, + ) + else: + img = np.zeros((h, w, 3), dtype=np.uint8) + frames_np.append(img) + + frames_t = torch.from_numpy(np.stack(frames_np, axis=0)).float() / 255.0 + return (frames_t, out_pose_data) + + +# ============================================================ +# PKL / pose_data IO +# ============================================================ + + +class _PoseDummyObj: + def __init__(self, *a, **k): + pass + + def __setstate__(self, state): + # поддержка dict и (dict, slotstate) + if isinstance(state, dict): + self.__dict__.update(state) + elif isinstance(state, (list, tuple)) and len(state) == 2 and isinstance(state[0], dict): + self.__dict__.update(state[0]) + if isinstance(state[1], dict): + self.__dict__.update(state[1]) + else: + self.__dict__["_slotstate"] = state[1] + else: + self.__dict__["_state"] = state + + +class _SafeUnpickler(pickle.Unpickler): + """ + Безопасно грузим PKL из ComfyUI окружения: + - ремап numpy._core -> numpy.core + - неизвестные классы (WanAnimatePreprocess.*) превращаем в простые объекты с __dict__ + """ + + def find_class(self, module, name): + # ремап внутренних путей numpy (частая проблема между версиями) + if module.startswith("numpy._core"): + module = module.replace("numpy._core", "numpy.core", 1) + if module.startswith("numpy._globals"): + module = module.replace("numpy._globals", "numpy", 1) + + # конкретные классы метаданных (если встречаются) + if name in {"AAPoseMeta"}: + return _PoseDummyObj + + try: + return super().find_class(module, name) + except Exception: + return _PoseDummyObj + + +def _load_pose_data_pkl(path: str) -> Any: + with open(path, "rb") as f: + return _SafeUnpickler(f).load() + + +def _coerce_pose_data_to_obj(pd: Any) -> Any: + """ + Accepts: + - dict pose_data + - object with attributes like .pose_metas (AAPoseMeta-like) + - str path to .pkl + - dict wrapper with 'pose_data' + """ + if isinstance(pd, str): + obj = _load_pose_data_pkl(pd) + return obj + + if isinstance(pd, dict) and "pose_data" in pd: + return pd["pose_data"] + + return pd + + +# ============================================================ +# pose_data <-> JSON-like KPS frames +# ============================================================ + + +def _as_attr(x: Any, key: str, default=None): + if isinstance(x, dict): + return x.get(key, default) + return getattr(x, key, default) + + +def _set_attr(x: Any, key: str, value: Any): + if isinstance(x, dict): + x[key] = value + else: + setattr(x, key, value) + + +def _xy_p_to_flat(xy: Optional[np.ndarray], p: Optional[np.ndarray]) -> Optional[List[float]]: + if xy is None: + return None + arr = np.asarray(xy) + if arr.ndim != 2 or arr.shape[1] < 2: + return None + N = arr.shape[0] + if p is None: + pp = np.ones((N,), dtype=np.float32) + else: + pp = np.asarray(p).reshape(-1) + if pp.shape[0] != N: + # если вдруг не совпали — подстрахуемся + pp = np.ones((N,), dtype=np.float32) + + out: List[float] = [] + for i in range(N): + out.extend([float(arr[i, 0]), float(arr[i, 1]), float(pp[i])]) + return out + + +def _flat_to_xy_p(flat: Optional[List[float]]) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]: + if not isinstance(flat, list) or len(flat) % 3 != 0: + return None, None + N = len(flat) // 3 + xy = np.zeros((N, 2), dtype=np.float32) + p = np.zeros((N,), dtype=np.float32) + for i in range(N): + xy[i, 0] = float(flat[3 * i + 0]) + xy[i, 1] = float(flat[3 * i + 1]) + p[i] = float(flat[3 * i + 2]) + return xy, p + + +def _pose_data_to_kps_frames(pose_data: Any, *, force_body_18: bool) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: + """ + Делает "как JSON" список кадров: + frame = {"people":[{pose_keypoints_2d, face_keypoints_2d, hand_left_keypoints_2d, hand_right_keypoints_2d}], + "canvas_width": W, "canvas_height": H} + meta_ref: ссылки на pose_metas + тип/доступ, чтобы правильно записать обратно. + """ + pose_metas = _as_attr(pose_data, "pose_metas", None) + if pose_metas is None: + # иногда называют иначе + pose_metas = _as_attr(pose_data, "frames", None) + + if pose_metas is None or not isinstance(pose_metas, list): + raise ValueError("pose_data does not contain 'pose_metas' list.") + + frames: List[Dict[str, Any]] = [] + for meta in pose_metas: + h = _as_attr(meta, "height", 1280) + w = _as_attr(meta, "width", 720) + + kps_body = _as_attr(meta, "kps_body", None) + kps_body_p = _as_attr(meta, "kps_body_p", None) + + kps_face = _as_attr(meta, "kps_face", None) + kps_face_p = _as_attr(meta, "kps_face_p", None) + + kps_lhand = _as_attr(meta, "kps_lhand", None) + kps_lhand_p = _as_attr(meta, "kps_lhand_p", None) + + kps_rhand = _as_attr(meta, "kps_rhand", None) + kps_rhand_p = _as_attr(meta, "kps_rhand_p", None) + + # to flat + pose_flat = _xy_p_to_flat(kps_body, kps_body_p) + face_flat = _xy_p_to_flat(kps_face, kps_face_p) + lh_flat = _xy_p_to_flat(kps_lhand, kps_lhand_p) + rh_flat = _xy_p_to_flat(kps_rhand, kps_rhand_p) + + if force_body_18 and isinstance(pose_flat, list) and len(pose_flat) >= 18 * 3: + pose_flat = pose_flat[: 18 * 3] + + person = { + "pose_keypoints_2d": pose_flat if pose_flat is not None else [], + "face_keypoints_2d": face_flat if face_flat is not None else [], + "hand_left_keypoints_2d": lh_flat, + "hand_right_keypoints_2d": rh_flat, + } + + frame = {"people": [person], "canvas_height": int(h), "canvas_width": int(w)} + frames.append(frame) + + meta_ref = { + "pose_metas": pose_metas, + "len": len(pose_metas), + } + return frames, meta_ref + + +def _kps_frames_to_pose_data( + pose_data_in: Any, + frames_kps: List[Dict[str, Any]], + meta_ref: Dict[str, Any], + *, + force_body_18: bool, +) -> Any: + """ + Записывает обратно сглаженные keypoints в pose_metas[*].kps_* / kps_*_p. + Остальные поля pose_data сохраняем. + """ + out_pd = copy.deepcopy(pose_data_in) + pose_metas_out = _as_attr(out_pd, "pose_metas", None) + if pose_metas_out is None: + # fallback: вдруг другой ключ + pose_metas_out = meta_ref.get("pose_metas") + + if pose_metas_out is None or not isinstance(pose_metas_out, list): + raise ValueError("Failed to locate pose_metas in output pose_data.") + + T = min(len(pose_metas_out), len(frames_kps)) + for t in range(T): + meta = pose_metas_out[t] + fr = frames_kps[t] + people = fr.get("people", []) if isinstance(fr, dict) else [] + p0 = people[0] if people else None + if not isinstance(p0, dict): + continue + + pose_flat = p0.get("pose_keypoints_2d") + face_flat = p0.get("face_keypoints_2d") + lh_flat = p0.get("hand_left_keypoints_2d") + rh_flat = p0.get("hand_right_keypoints_2d") + + if force_body_18 and isinstance(pose_flat, list) and len(pose_flat) >= 18 * 3: + pose_flat = pose_flat[: 18 * 3] + + body_xy, body_p = _flat_to_xy_p(pose_flat if isinstance(pose_flat, list) else None) + face_xy, face_p = _flat_to_xy_p(face_flat if isinstance(face_flat, list) else None) + lh_xy, lh_p = _flat_to_xy_p(lh_flat if isinstance(lh_flat, list) else None) + rh_xy, rh_p = _flat_to_xy_p(rh_flat if isinstance(rh_flat, list) else None) + + if body_xy is not None and body_p is not None: + _set_attr(meta, "kps_body", body_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_body_p", body_p.astype(np.float32, copy=False)) + + if face_xy is not None and face_p is not None: + _set_attr(meta, "kps_face", face_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_face_p", face_p.astype(np.float32, copy=False)) + + if lh_xy is not None and lh_p is not None: + _set_attr(meta, "kps_lhand", lh_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_lhand_p", lh_p.astype(np.float32, copy=False)) + + if rh_xy is not None and rh_p is not None: + _set_attr(meta, "kps_rhand", rh_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_rhand_p", rh_p.astype(np.float32, copy=False)) + + # обновим width/height если нужно + if isinstance(fr, dict): + if "canvas_width" in fr: + _set_attr(meta, "width", int(fr["canvas_width"])) + if "canvas_height" in fr: + _set_attr(meta, "height", int(fr["canvas_height"])) + + # обязательно положим pose_metas обратно + _set_attr(out_pd, "pose_metas", pose_metas_out) + return out_pd + + +def _extract_canvas_wh(data: Any, default_w: int, default_h: int) -> Tuple[int, int]: + w, h = int(default_w), int(default_h) + if isinstance(data, list): + for fr in data: + if isinstance(fr, dict) and "canvas_width" in fr and "canvas_height" in fr: + try: + w = int(fr["canvas_width"]) + h = int(fr["canvas_height"]) + break + except Exception: + pass + return w, h + + +# ============================================================ +# === START: smooth_KPS_json.py logic (ported as-is) +# ============================================================ + +# --- Root+Scale carry (when torso disappears on close-up) --- +ROOTSCALE_CARRY_ENABLED = True +CARRY_MAX_FRAMES = 48 +CARRY_MIN_ANCHORS = 2 +CARRY_ANCHOR_JOINTS = [0, 1, 2, 5, 3, 6, 4, 7] +CARRY_CONF_GATE = 0.20 + +# --- Main person selection / multi-person filtering --- +FILTER_EXTRA_PEOPLE = True +MAIN_PERSON_MODE = "longest_track" +TRACK_MATCH_MIN_PX = 80.0 +TRACK_MATCH_FACTOR = 3.0 +TRACK_MAX_FRAME_GAP = 32 + +# --- Spatial outlier suppression --- +SPATIAL_OUTLIER_FIX = True +BONE_MAX_FACTOR = 2.3 +TORSO_RADIUS_FACTOR = 4.0 + +# EMA smoothing for BODY only (online) +ALPHA_BODY = 0.70 +MAX_STEP_BODY = 60.0 +VEL_ALPHA = 0.45 +EPS = 0.3 +CONF_GATE_BODY = 0.20 +CONF_FLOOR_BODY = 0.00 + +TRACK_DIST_PENALTY = 1.5 +FACE_WEIGHT_IN_SCORE = 0.15 +HAND_WEIGHT_IN_SCORE = 0.35 + +ALLOW_DISAPPEAR_JOINTS = {3, 4, 6, 7} + +GAP_FILL_ENABLED = True +MAX_GAP_FRAMES = 12 +MIN_RUN_FRAMES = 2 + +TORSO_SYNC_ENABLED = True +TORSO_JOINTS = {1, 2, 5, 8, 11} +TORSO_LOOKAHEAD_FRAMES = 32 + +SUPER_SMOOTH_ENABLED = True +SUPER_SMOOTH_ALPHA = 0.7 +SUPER_SMOOTH_MIN_CONF = 0.20 + +MEDIAN3_ENABLED = True + +FACE_SMOOTH_ENABLED = True +HANDS_SMOOTH_ENABLED = False + +CONF_GATE_FACE = 0.20 +CONF_GATE_HAND = 0.50 + +HAND_MIN_POINTS_PRESENT = 7 +MIN_HAND_RUN_FRAMES = 6 + +DENSE_GAP_FILL_ENABLED = False +DENSE_MAX_GAP_FRAMES = 8 +DENSE_MIN_RUN_FRAMES = 2 + +DENSE_MEDIAN3_ENABLED = False +DENSE_SUPER_SMOOTH_ENABLED = False +DENSE_SUPER_SMOOTH_ALPHA = 0.7 + + +def _snapshot_tunable_globals() -> Dict[str, Any]: + keys = [ + "FILTER_EXTRA_PEOPLE", + "SUPER_SMOOTH_ALPHA", + "MAX_GAP_FRAMES", + "MIN_RUN_FRAMES", + "DENSE_SUPER_SMOOTH_ALPHA", + "DENSE_MAX_GAP_FRAMES", + "DENSE_MIN_RUN_FRAMES", + ] + return {k: globals().get(k) for k in keys} + + +def _restore_tunable_globals(old: Dict[str, Any]) -> None: + for k, v in old.items(): + globals()[k] = v + + +def _is_valid_xyc(x: float, y: float, c: float) -> bool: + if c is None: + return False + if c <= 0: + return False + if x == 0 and y == 0: + return False + if math.isnan(x) or math.isnan(y) or math.isnan(c): + return False + return True + + +def _reshape_keypoints_2d(arr: List[float]) -> List[Tuple[float, float, float]]: + if arr is None: + return [] + if len(arr) % 3 != 0: + raise ValueError(f"keypoints length not multiple of 3: {len(arr)}") + out = [] + for i in range(0, len(arr), 3): + out.append((float(arr[i]), float(arr[i + 1]), float(arr[i + 2]))) + return out + + +def _flatten_keypoints_2d(kps: List[Tuple[float, float, float]]) -> List[float]: + out: List[float] = [] + for x, y, c in kps: + out.extend([float(x), float(y), float(c)]) + return out + + +def _sum_conf(arr: Optional[List[float]], sample_step: int = 1) -> float: + if not arr: + return 0.0 + s = 0.0 + for i in range(2, len(arr), 3 * sample_step): + try: + c = float(arr[i]) + except Exception: + c = 0.0 + if c > 0: + s += c + return s + + +def _body_center_from_pose(pose_arr: Optional[List[float]]) -> Optional[Tuple[float, float]]: + if not pose_arr: + return None + kps = _reshape_keypoints_2d(pose_arr) + idxs = [2, 5, 8, 11, 1] + pts = [] + for idx in idxs: + if idx < len(kps): + x, y, c = kps[idx] + if _is_valid_xyc(x, y, c): + pts.append((x, y)) + if not pts: + for x, y, c in kps: + if _is_valid_xyc(x, y, c): + pts.append((x, y)) + if not pts: + return None + cx = sum(p[0] for p in pts) / len(pts) + cy = sum(p[1] for p in pts) / len(pts) + return (cx, cy) + + +def _dist(a: Tuple[float, float], b: Tuple[float, float]) -> float: + return math.hypot(a[0] - b[0], a[1] - b[1]) + + +def _choose_single_person( + people: List[Dict[str, Any]], prev_center: Optional[Tuple[float, float]] +) -> Optional[Dict[str, Any]]: + if not people: + return None + best = None + best_score = -1e18 + + for p in people: + pose = p.get("pose_keypoints_2d") + face = p.get("face_keypoints_2d") + lh = p.get("hand_left_keypoints_2d") + rh = p.get("hand_right_keypoints_2d") + + score = _sum_conf(pose) + score += FACE_WEIGHT_IN_SCORE * _sum_conf(face, sample_step=4) + score += HAND_WEIGHT_IN_SCORE * (_sum_conf(lh, sample_step=2) + _sum_conf(rh, sample_step=2)) + + center = _body_center_from_pose(pose) + if prev_center is not None and center is not None: + score -= TRACK_DIST_PENALTY * _dist(prev_center, center) + + if score > best_score: + best_score = score + best = p + + return best + + +@dataclass +class _Track: + frames: Dict[int, Dict[str, Any]] + centers: Dict[int, Tuple[float, float]] + last_t: int + last_center: Tuple[float, float] + + +def _estimate_torso_scale(pose: List[Tuple[float, float, float]]) -> Optional[float]: + def dist(i, k) -> Optional[float]: + if i >= len(pose) or k >= len(pose): + return None + xi, yi, ci = pose[i] + xk, yk, ck = pose[k] + if not _is_valid_xyc(xi, yi, ci) or not _is_valid_xyc(xk, yk, ck): + return None + return math.hypot(xi - xk, yi - yk) + + cand = [dist(2, 5), dist(8, 11), dist(1, 8), dist(1, 11)] + cand = [c for c in cand if c is not None and c > 1e-3] + if not cand: + return None + return float(sum(cand) / len(cand)) + + +def _track_match_threshold_from_pose(pose_arr: Optional[List[float]]) -> float: + if isinstance(pose_arr, list): + pose = _reshape_keypoints_2d(pose_arr) + s = _estimate_torso_scale(pose) + if s is not None: + return max(float(TRACK_MATCH_MIN_PX), float(TRACK_MATCH_FACTOR) * float(s)) + return float(max(TRACK_MATCH_MIN_PX, 120.0)) + + +def _build_tracks_over_video(frames_data: List[Any]) -> List[_Track]: + tracks: List[_Track] = [] + + for t, frame in enumerate(frames_data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or not people: + continue + + cand: List[Tuple[int, Dict[str, Any], Tuple[float, float]]] = [] + for i, p in enumerate(people): + if not isinstance(p, dict): + continue + pose = p.get("pose_keypoints_2d") + c = _body_center_from_pose(pose) + if c is None: + continue + cand.append((i, p, c)) + + if not cand: + continue + + used = set() + track_order = sorted(range(len(tracks)), key=lambda k: tracks[k].last_t, reverse=True) + + for k in track_order: + tr = tracks[k] + age = t - tr.last_t + if age > int(TRACK_MAX_FRAME_GAP): + continue + + best_idx = None + best_d = 1e18 + + for i, p, cc in cand: + if i in used: + continue + + thr = _track_match_threshold_from_pose(p.get("pose_keypoints_2d")) + d = _dist(tr.last_center, cc) + if d <= thr and d < best_d: + best_d = d + best_idx = i + + if best_idx is not None: + i, p, cc = next(x for x in cand if x[0] == best_idx) + used.add(i) + tr.frames[t] = p + tr.centers[t] = cc + tr.last_t = t + tr.last_center = cc + + for i, p, cc in cand: + if i in used: + continue + tracks.append(_Track(frames={t: p}, centers={t: cc}, last_t=t, last_center=cc)) + + return tracks + + +def _track_presence_score(tr: _Track) -> Tuple[int, float, float]: + frames_count = len(tr.frames) + face_sum = 0.0 + body_sum = 0.0 + for p in tr.frames.values(): + face_sum += _sum_conf(p.get("face_keypoints_2d"), sample_step=4) + body_sum += _sum_conf(p.get("pose_keypoints_2d"), sample_step=1) + return (frames_count, face_sum, body_sum) + + +def _pick_main_track(tracks: List[_Track]) -> Optional[_Track]: + if not tracks: + return None + best = None + best_key = (-1, -1e18, -1e18) + for tr in tracks: + key = _track_presence_score(tr) + if key > best_key: + best_key = key + best = tr + return best + + +@dataclass +class BodyState: + last_xy: List[Optional[Tuple[float, float]]] + last_v: List[Tuple[float, float]] + + def __init__(self, joints: int): + self.last_xy = [None] * joints + self.last_v = [(0.0, 0.0)] * joints + + +def _smooth_body_pose(pose_arr: Optional[List[float]], state: BodyState) -> Optional[List[float]]: + if pose_arr is None: + return None + + kps = _reshape_keypoints_2d(pose_arr) + J = len(kps) + if len(state.last_xy) != J: + state.last_xy = [None] * J + state.last_v = [(0.0, 0.0)] * J + + out: List[Tuple[float, float, float]] = [] + + for j in range(J): + x, y, c = kps[j] + last = state.last_xy[j] + vx_last, vy_last = state.last_v[j] + + valid_in = _is_valid_xyc(x, y, c) and (c >= CONF_GATE_BODY) + + if valid_in: + if last is None: + nx, ny = x, y + state.last_xy[j] = (nx, ny) + state.last_v[j] = (0.0, 0.0) + out.append((nx, ny, float(c))) + continue + + dx_raw = x - last[0] + dy_raw = y - last[1] + if abs(dx_raw) < EPS: + dx_raw = 0.0 + if abs(dy_raw) < EPS: + dy_raw = 0.0 + + vx = VEL_ALPHA * dx_raw + (1.0 - VEL_ALPHA) * vx_last + vy = VEL_ALPHA * dy_raw + (1.0 - VEL_ALPHA) * vy_last + + px = last[0] + vx + py = last[1] + vy + + nx = ALPHA_BODY * x + (1.0 - ALPHA_BODY) * px + ny = ALPHA_BODY * y + (1.0 - ALPHA_BODY) * py + + ddx = nx - last[0] + ddy = ny - last[1] + d = math.hypot(ddx, ddy) + if d > MAX_STEP_BODY and d > 1e-6: + scale = MAX_STEP_BODY / d + nx = last[0] + ddx * scale + ny = last[1] + ddy * scale + vx = nx - last[0] + vy = ny - last[1] + + state.last_xy[j] = (nx, ny) + state.last_v[j] = (vx, vy) + + out.append((nx, ny, float(c))) + else: + out.append((float(x), float(y), float(c))) + + return _flatten_keypoints_2d(out) + + +COCO18_EDGES = [ + (1, 2), + (2, 3), + (3, 4), + (1, 5), + (5, 6), + (6, 7), + (1, 8), + (8, 9), + (9, 10), + (1, 11), + (11, 12), + (12, 13), + (8, 11), + (1, 0), + (0, 14), + (14, 16), + (0, 15), + (15, 17), +] + +HAND21_EDGES = [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (0, 5), + (5, 6), + (6, 7), + (7, 8), + (0, 9), + (9, 10), + (10, 11), + (11, 12), + (0, 13), + (13, 14), + (14, 15), + (15, 16), + (0, 17), + (17, 18), + (18, 19), + (19, 20), +] + +_NEIGHBORS = None + + +def _build_neighbors(): + global _NEIGHBORS + if _NEIGHBORS is not None: + return + neigh = {} + for a, b in COCO18_EDGES: + neigh.setdefault(a, set()).add(b) + neigh.setdefault(b, set()).add(a) + _NEIGHBORS = neigh + + +def _suppress_spatial_outliers_in_pose_arr( + pose_arr: Optional[List[float]], *, conf_gate: float +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + + pose = _reshape_keypoints_2d(pose_arr) + J = len(pose) + + center = _body_center_from_pose(pose_arr) + scale = _estimate_torso_scale(pose) + if center is None or scale is None: + return pose_arr + + cx, cy = center + max_r = TORSO_RADIUS_FACTOR * scale + max_bone = BONE_MAX_FACTOR * scale + + out = [list(p) for p in pose] + + def visible(j: int) -> bool: + if j >= J: + return False + x, y, c = out[j] + return (c >= conf_gate) and not (x == 0 and y == 0) + + for j in range(J): + x, y, c = out[j] + if c >= conf_gate and not (x == 0 and y == 0): + if math.hypot(x - cx, y - cy) > max_r: + out[j] = [0.0, 0.0, 0.0] + + for a, b in COCO18_EDGES: + if a >= J or b >= J: + continue + if not visible(a) or not visible(b): + continue + ax, ay, ac = out[a] + bx, by, bc = out[b] + d = math.hypot(ax - bx, ay - by) + if d > max_bone: + if ac <= bc: + out[a] = [0.0, 0.0, 0.0] + else: + out[b] = [0.0, 0.0, 0.0] + + flat: List[float] = [] + for x, y, c in out: + flat.extend([float(x), float(y), float(c)]) + return flat + + +def _suppress_isolated_joints_in_pose_arr( + pose_arr: Optional[List[float]], *, conf_gate: float, keep: set[int] = None +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + + _build_neighbors() + pose = _reshape_keypoints_2d(pose_arr) + J = len(pose) + out = [list(p) for p in pose] + + if keep is None: + keep = set() + + def vis(j: int) -> bool: + if j >= J: + return False + x, y, c = out[j] + return (c >= conf_gate) and not (x == 0 and y == 0) + + for j in range(J): + if j in keep: + continue + if not vis(j): + continue + neighs = _NEIGHBORS.get(j, set()) + if not any((n < J and vis(n)) for n in neighs): + out[j] = [0.0, 0.0, 0.0] + + flat = [] + for x, y, c in out: + flat.extend([float(x), float(y), float(c)]) + return flat + + +def _denoise_and_fill_gaps_pose_seq( + pose_arr_seq: List[Optional[List[float]]], + *, + conf_gate: float, + min_run: int, + max_gap: int, +) -> List[Optional[List[float]]]: + if not pose_arr_seq: + return pose_arr_seq + + J = None + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_arr_seq + + T = len(pose_arr_seq) + out_seq: List[Optional[List[float]]] = [] + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) == J * 3: + out_seq.append(list(arr)) + else: + out_seq.append(arr) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + # 1) remove short flashes + for j in range(J): + start = None + for t in range(T + 1): + cur = False + if t < T and isinstance(out_seq[t], list): + cur = is_vis(out_seq[t], j) + if cur and start is None: + start = t + if (not cur) and start is not None: + run_len = t - start + if run_len < min_run: + for k in range(start, t): + if not isinstance(out_seq[k], list): + continue + out_seq[k][3 * j + 0] = 0.0 + out_seq[k][3 * j + 1] = 0.0 + out_seq[k][3 * j + 2] = 0.0 + start = None + + # 2) gap fill only if returns + for j in range(J): + last_vis_t = None + t = 0 + while t < T: + arr = out_seq[t] + if not isinstance(arr, list): + t += 1 + continue + + cur_vis = is_vis(arr, j) + if cur_vis: + last_vis_t = t + t += 1 + continue + + if last_vis_t is None: + t += 1 + continue + + gap_start = t + t2 = t + while t2 < T: + arr2 = out_seq[t2] + if isinstance(arr2, list) and is_vis(arr2, j): + break + t2 += 1 + + if t2 >= T: + break + + gap_len = t2 - gap_start + if gap_len <= 0: + t = t2 + continue + + if gap_len <= max_gap: + a = out_seq[last_vis_t] + b = out_seq[t2] + if isinstance(a, list) and isinstance(b, list): + ax, ay, ac = float(a[3 * j + 0]), float(a[3 * j + 1]), float(a[3 * j + 2]) + bx, by, bc = float(b[3 * j + 0]), float(b[3 * j + 1]), float(b[3 * j + 2]) + if not (ax == 0 and ay == 0) and not (bx == 0 and by == 0): + conf_fill = min(ac, bc) + for k in range(gap_len): + tt = gap_start + k + if not isinstance(out_seq[tt], list): + continue + r = (k + 1) / (gap_len + 1) + x = ax + (bx - ax) * r + y = ay + (by - ay) * r + out_seq[tt][3 * j + 0] = float(x) + out_seq[tt][3 * j + 1] = float(y) + out_seq[tt][3 * j + 2] = float(conf_fill) + + t = t2 + + return out_seq + + +def _zero_lag_ema_pose_seq( + pose_seq: List[Optional[List[float]]], *, alpha: float, conf_gate: float +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + T = len(pose_seq) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + fwd = [None] * T + last = [None] * J + for t in range(T): + arr = pose_seq[t] + if not isinstance(arr, list) or len(arr) != J * 3: + fwd[t] = arr + continue + out = list(arr) + for j in range(J): + if is_vis(arr, j): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + if last[j] is None: + sx, sy = x, y + else: + sx = alpha * x + (1 - alpha) * last[j][0] + sy = alpha * y + (1 - alpha) * last[j][1] + last[j] = (sx, sy) + out[3 * j + 0] = float(sx) + out[3 * j + 1] = float(sy) + fwd[t] = out + + bwd = [None] * T + last = [None] * J + for t in range(T - 1, -1, -1): + arr = fwd[t] + if not isinstance(arr, list) or len(arr) != J * 3: + bwd[t] = arr + continue + out = list(arr) + for j in range(J): + if is_vis(arr, j): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + if last[j] is None: + sx, sy = x, y + else: + sx = alpha * x + (1 - alpha) * last[j][0] + sy = alpha * y + (1 - alpha) * last[j][1] + last[j] = (sx, sy) + out[3 * j + 0] = float(sx) + out[3 * j + 1] = float(sy) + bwd[t] = out + + return bwd + + +def _apply_root_scale( + pose_arr: Optional[List[float]], + *, + src_root: Tuple[float, float], + src_scale: float, + dst_root: Tuple[float, float], + dst_scale: float, +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + if src_scale <= 1e-6 or dst_scale <= 1e-6: + return pose_arr + + kps = _reshape_keypoints_2d(pose_arr) + out = [] + s = dst_scale / src_scale + + for x, y, c in kps: + if c <= 0 or (x == 0 and y == 0): + out.append((x, y, c)) + continue + nx = dst_root[0] + (x - src_root[0]) * s + ny = dst_root[1] + (y - src_root[1]) * s + out.append((nx, ny, c)) + + return _flatten_keypoints_2d(out) + + +def _carry_pose_when_torso_missing( + pose_seq: List[Optional[List[float]]], + *, + conf_gate: float, + max_carry: int, + anchor_joints: List[int], + min_anchors: int, +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + out = [a if a is None else list(a) for a in pose_seq] + + FILL_JOINTS = {1, 8, 9, 10, 11, 12, 13} + FILL_JOINTS -= set(ALLOW_DISAPPEAR_JOINTS) + + def is_vis_flat(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + def count_visible(arr: List[float], joints: List[int]) -> int: + c = 0 + for j in joints: + if j < J and is_vis_flat(arr, j): + c += 1 + return c + + def root_scale_from_anchors(arr: List[float]) -> Optional[Tuple[Tuple[float, float], float]]: + pts = [] + for j in anchor_joints: + if j >= J: + continue + if is_vis_flat(arr, j): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + pts.append((x, y)) + if len(pts) < min_anchors: + return None + + rx = sum(p[0] for p in pts) / len(pts) + ry = sum(p[1] for p in pts) / len(pts) + + xs = [p[0] for p in pts] + ys = [p[1] for p in pts] + scale = max(max(xs) - min(xs), max(ys) - min(ys)) + if scale <= 1e-3: + return None + + return (rx, ry), float(scale) + + last_good: Optional[List[float]] = None + last_good_rs: Optional[Tuple[Tuple[float, float], float]] = None + carry_left = 0 + + for t in range(len(out)): + arr = out[t] + if not isinstance(arr, list) or len(arr) != J * 3: + continue + + anchors_ok = count_visible(arr, anchor_joints) >= min_anchors + fill_vis = sum(1 for j in FILL_JOINTS if j < J and is_vis_flat(arr, j)) + rs = root_scale_from_anchors(arr) + + if anchors_ok and rs is not None and fill_vis >= 2: + last_good = list(arr) + last_good_rs = rs + carry_left = max_carry + continue + + if anchors_ok and rs is not None and last_good is not None and last_good_rs is not None and carry_left > 0: + dst_root, dst_scale = rs + src_root, src_scale = last_good_rs + + carried_full = _apply_root_scale( + last_good, + src_root=src_root, + src_scale=src_scale, + dst_root=dst_root, + dst_scale=dst_scale, + ) + if isinstance(carried_full, list) and len(carried_full) == J * 3: + for j in FILL_JOINTS: + if j >= J: + continue + if is_vis_flat(arr, j): + continue + + cx = float(carried_full[3 * j + 0]) + cy = float(carried_full[3 * j + 1]) + cc = float(carried_full[3 * j + 2]) + + if (cx == 0 and cy == 0) or cc <= 0: + continue + + arr[3 * j + 0] = cx + arr[3 * j + 1] = cy + arr[3 * j + 2] = max(min(cc, 0.60), conf_gate) + + out[t] = arr + carry_left -= 1 + continue + + carry_left = max(carry_left - 1, 0) + + return out + + +def _force_full_torso_pair( + pose_seq: List[Optional[List[float]]], + *, + conf_gate: float, + anchor_joints: List[int], + min_anchors: int, + max_lookback: int = 240, + fill_legs_with_hip: bool = True, + always_fill_if_one_hip: bool = True, +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + out = [a if a is None else list(a) for a in pose_seq] + + R_HIP, R_KNEE, R_ANK = 8, 9, 10 + L_HIP, L_KNEE, L_ANK = 11, 12, 13 + + def is_vis(arr: List[float], j: int) -> bool: + if j >= J: + return False + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + def count_visible(arr: List[float], joints: List[int]) -> int: + c = 0 + for j in joints: + if is_vis(arr, j): + c += 1 + return c + + def root_scale_from_anchors(arr: List[float]) -> Optional[Tuple[Tuple[float, float], float]]: + pts = [] + for j in anchor_joints: + if j >= J: + continue + if is_vis(arr, j): + pts.append((float(arr[3 * j + 0]), float(arr[3 * j + 1]))) + if len(pts) < min_anchors: + return None + + rx = sum(p[0] for p in pts) / len(pts) + ry = sum(p[1] for p in pts) / len(pts) + + xs = [p[0] for p in pts] + ys = [p[1] for p in pts] + scale = max(max(xs) - min(xs), max(ys) - min(ys)) + if scale <= 1e-3: + return None + return (rx, ry), float(scale) + + last_full_idx = None + last_full = None + last_full_rs = None + + for t in range(len(out)): + arr = out[t] + if not isinstance(arr, list) or len(arr) != J * 3: + continue + + rs = root_scale_from_anchors(arr) + + r_ok = is_vis(arr, R_HIP) + l_ok = is_vis(arr, L_HIP) + + anchors_ok = count_visible(arr, anchor_joints) >= min_anchors + + if anchors_ok and rs is not None and r_ok and l_ok: + last_full_idx = t + last_full = list(arr) + last_full_rs = rs + continue + + if last_full is None or last_full_rs is None or last_full_idx is None: + continue + if (t - last_full_idx) > max_lookback: + continue + if not (r_ok or l_ok): + continue + if r_ok and l_ok: + continue + if not always_fill_if_one_hip: + continue + if rs is None: + continue + + dst_root, dst_scale = rs + src_root, src_scale = last_full_rs + + carried = _apply_root_scale( + last_full, + src_root=src_root, + src_scale=src_scale, + dst_root=dst_root, + dst_scale=dst_scale, + ) + if not (isinstance(carried, list) and len(carried) == J * 3): + continue + + def copy_joint(j: int): + if j >= J: + return + if is_vis(arr, j): + return + cx = float(carried[3 * j + 0]) + cy = float(carried[3 * j + 1]) + cc = float(carried[3 * j + 2]) + if (cx == 0 and cy == 0) or cc <= 0: + return + arr[3 * j + 0] = cx + arr[3 * j + 1] = cy + arr[3 * j + 2] = max(min(cc, 0.60), conf_gate) + + if not r_ok: + copy_joint(R_HIP) + if fill_legs_with_hip: + copy_joint(R_KNEE) + copy_joint(R_ANK) + + if not l_ok: + copy_joint(L_HIP) + if fill_legs_with_hip: + copy_joint(L_KNEE) + copy_joint(L_ANK) + + out[t] = arr + + return out + + +def _median3_pose_seq(pose_seq: List[Optional[List[float]]], *, conf_gate: float) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + + J = None + for arr in pose_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_seq + + T = len(pose_seq) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + out_seq: List[Optional[List[float]]] = [] + for t in range(T): + arr = pose_seq[t] + if not isinstance(arr, list) or len(arr) != J * 3: + out_seq.append(arr) + continue + + out = list(arr) + t0 = max(0, t - 1) + t1 = t + t2 = min(T - 1, t + 1) + + a0 = pose_seq[t0] + a1 = pose_seq[t1] + a2 = pose_seq[t2] + + for j in range(J): + if not is_vis(arr, j): + continue + + xs, ys = [], [] + for aa in (a0, a1, a2): + if isinstance(aa, list) and len(aa) == J * 3 and is_vis(aa, j): + xs.append(float(aa[3 * j + 0])) + ys.append(float(aa[3 * j + 1])) + + if len(xs) >= 2: + xs.sort() + ys.sort() + out[3 * j + 0] = float(xs[len(xs) // 2]) + out[3 * j + 1] = float(ys[len(ys) // 2]) + + out_seq.append(out) + + return out_seq + + +def _sync_group_appearances( + pose_arr_seq: List[Optional[List[float]]], + *, + group: set[int], + conf_gate: float, + lookahead: int, +) -> List[Optional[List[float]]]: + if not pose_arr_seq: + return pose_arr_seq + + J = None + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0: + J = len(arr) // 3 + break + if J is None: + return pose_arr_seq + + T = len(pose_arr_seq) + out_seq: List[Optional[List[float]]] = [] + for arr in pose_arr_seq: + if isinstance(arr, list) and len(arr) == J * 3: + out_seq.append(list(arr)) + else: + out_seq.append(arr) + + def is_vis(arr: List[float], j: int) -> bool: + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + return (c >= conf_gate) and not (x == 0 and y == 0) + + for t in range(T): + arr = out_seq[t] + if not isinstance(arr, list): + continue + + vis = {j for j in group if j < J and is_vis(arr, j)} + if not vis: + continue + + missing = {j for j in group if j < J and j not in vis} + if not missing: + continue + + appear_t: dict[int, int] = {} + for j in list(missing): + t2 = t + 1 + while t2 < T and t2 <= t + lookahead: + arr2 = out_seq[t2] + if isinstance(arr2, list) and is_vis(arr2, j): + appear_t[j] = t2 + break + t2 += 1 + + if not appear_t: + continue + + for j, t2 in appear_t.items(): + last_t = None + for tb in range(t - 1, -1, -1): + arrb = out_seq[tb] + if isinstance(arrb, list) and is_vis(arrb, j): + last_t = tb + break + + if last_t is None: + b = out_seq[t2] + if not isinstance(b, list): + continue + bx, by, bc = float(b[3 * j + 0]), float(b[3 * j + 1]), float(b[3 * j + 2]) + for k in range(t, t2): + a = out_seq[k] + if not isinstance(a, list): + continue + a[3 * j + 0] = bx + a[3 * j + 1] = by + a[3 * j + 2] = bc + continue + + a0 = out_seq[last_t] + b0 = out_seq[t2] + if not (isinstance(a0, list) and isinstance(b0, list)): + continue + + ax, ay, ac = float(a0[3 * j + 0]), float(a0[3 * j + 1]), float(a0[3 * j + 2]) + bx, by, bc = float(b0[3 * j + 0]), float(b0[3 * j + 1]), float(b0[3 * j + 2]) + + if (ax == 0 and ay == 0) or (bx == 0 and by == 0): + continue + + conf_fill = min(ac, bc) + total = t2 - last_t + if total <= 0: + continue + + for tt in range(t, t2): + a = out_seq[tt] + if not isinstance(a, list): + continue + r = (tt - last_t) / total + x = ax + (bx - ax) * r + y = ay + (by - ay) * r + a[3 * j + 0] = float(x) + a[3 * j + 1] = float(y) + a[3 * j + 2] = float(conf_fill) + + return out_seq + + +def _count_valid_points(arr: Optional[List[float]], *, conf_gate: float) -> int: + if not isinstance(arr, list) or len(arr) % 3 != 0: + return 0 + cnt = 0 + for i in range(0, len(arr), 3): + x, y, c = float(arr[i]), float(arr[i + 1]), float(arr[i + 2]) + if c >= conf_gate and not (x == 0 and y == 0): + cnt += 1 + return cnt + + +def _zero_out_kps(arr: Optional[List[float]]) -> Optional[List[float]]: + if not isinstance(arr, list) or len(arr) % 3 != 0: + return arr + out = list(arr) + for i in range(0, len(out), 3): + out[i + 0] = 0.0 + out[i + 1] = 0.0 + out[i + 2] = 0.0 + return out + + +def _pin_body_wrist_to_hand( + p_out: Dict[str, Any], + *, + side: str, + conf_gate_body: float = 0.2, + conf_gate_hand: float = 0.2, + blend: float = 1.0, +) -> None: + if side == "right": + bw = 4 + hk = "hand_right_keypoints_2d" + else: + bw = 7 + hk = "hand_left_keypoints_2d" + + pose = p_out.get("pose_keypoints_2d") + hand = p_out.get(hk) + + if not (isinstance(pose, list) and isinstance(hand, list)): + return + if len(pose) < (bw * 3 + 3): + return + if len(hand) < 3: + return + + hx, hy, hc = float(hand[0]), float(hand[1]), float(hand[2]) + if hc < conf_gate_hand or (hx == 0.0 and hy == 0.0): + return + + bx, by, bc = float(pose[bw * 3 + 0]), float(pose[bw * 3 + 1]), float(pose[bw * 3 + 2]) + + if bc < conf_gate_body or (bx == 0.0 and by == 0.0): + pose[bw * 3 + 0] = hx + pose[bw * 3 + 1] = hy + pose[bw * 3 + 2] = float(max(bc, min(hc, 0.9))) + else: + nx = bx * (1.0 - blend) + hx * blend + ny = by * (1.0 - blend) + hy * blend + pose[bw * 3 + 0] = nx + pose[bw * 3 + 1] = ny + pose[bw * 3 + 2] = float(min(bc, hc)) + + p_out["pose_keypoints_2d"] = pose + + +def _fix_elbow_using_wrist(p_out: Dict[str, Any], *, side: str, conf_gate: float = 0.2) -> None: + pose = p_out.get("pose_keypoints_2d") + if not isinstance(pose, list) or len(pose) % 3 != 0: + return + + if side == "right": + sh, el, wr = 2, 3, 4 + else: + sh, el, wr = 5, 6, 7 + + def get(j): + return float(pose[3 * j + 0]), float(pose[3 * j + 1]), float(pose[3 * j + 2]) + + def vis(x, y, c): + return c >= conf_gate and not (x == 0.0 and y == 0.0) + + sx, sy, sc = get(sh) + ex, ey, ec = get(el) + wx, wy, wc = get(wr) + + if not (vis(sx, sy, sc) and vis(wx, wy, wc)): + return + + if vis(ex, ey, ec): + Lse = math.hypot(ex - sx, ey - sy) + Lew = math.hypot(wx - ex, wy - ey) + else: + dsw = math.hypot(wx - sx, wy - sy) + if dsw < 1e-3: + return + Lse = 0.55 * dsw + Lew = 0.45 * dsw + + dx = wx - sx + dy = wy - sy + d = math.hypot(dx, dy) + if d < 1e-6: + return + + d2 = max(min(d, (Lse + Lew) - 1e-3), abs(Lse - Lew) + 1e-3) + + a = (Lse * Lse - Lew * Lew + d2 * d2) / (2.0 * d2) + h2 = max(Lse * Lse - a * a, 0.0) + h = math.sqrt(h2) + + ux = dx / d + uy = dy / d + px = sx + a * ux + py = sy + a * uy + + rx = -uy + ry = ux + + e1x, e1y = px + h * rx, py + h * ry + e2x, e2y = px - h * rx, py - h * ry + + if vis(ex, ey, ec): + if math.hypot(e1x - ex, e1y - ey) <= math.hypot(e2x - ex, e2y - ey): + nx, ny = e1x, e1y + else: + nx, ny = e2x, e2y + else: + nx, ny = e1x, e1y + + pose[3 * el + 0] = float(nx) + pose[3 * el + 1] = float(ny) + pose[3 * el + 2] = float(max(min(ec, 0.8), conf_gate)) + + p_out["pose_keypoints_2d"] = pose + + +def _remove_short_presence_runs_kps_seq( + seq: List[Optional[List[float]]], + *, + conf_gate: float, + min_points_present: int, + min_run: int, +) -> List[Optional[List[float]]]: + if not seq: + return seq + + present = [(_count_valid_points(a, conf_gate=conf_gate) >= min_points_present) for a in seq] + out = [None if a is None else list(a) for a in seq] + + start = None + for t in range(len(seq) + 1): + cur = present[t] if t < len(seq) else False + if cur and start is None: + start = t + if (not cur) and start is not None: + run_len = t - start + if run_len < min_run: + for k in range(start, t): + out[k] = _zero_out_kps(out[k]) + start = None + + return out + + +def _zero_sparse_frames_kps_seq( + seq: List[Optional[List[float]]], *, conf_gate: float, min_points_present: int +) -> List[Optional[List[float]]]: + if not seq: + return seq + + out: List[Optional[List[float]]] = [] + for a in seq: + if not isinstance(a, list): + out.append(a) + continue + if _count_valid_points(a, conf_gate=conf_gate) < min_points_present: + out.append(_zero_out_kps(a)) + else: + out.append(a) + return out + + +def _suppress_spatial_outliers_in_hand_arr( + hand_arr: Optional[List[float]], *, conf_gate: float, max_bone_factor: float = 3.0 +) -> Optional[List[float]]: + if not isinstance(hand_arr, list) or len(hand_arr) % 3 != 0: + return hand_arr + pts = _reshape_keypoints_2d(hand_arr) + J = len(pts) + if J < 21: + return hand_arr + + out = [list(p) for p in pts] + + def vis(j: int) -> bool: + x, y, c = out[j] + return c >= conf_gate and not (x == 0 and y == 0) + + vv = [(x, y) for (x, y, c) in out if c >= conf_gate and not (x == 0 and y == 0)] + if len(vv) < 6: + return hand_arr + xs = [p[0] for p in vv] + ys = [p[1] for p in vv] + scale = max(max(xs) - min(xs), max(ys) - min(ys)) + if scale <= 1e-3: + return hand_arr + max_bone = max_bone_factor * scale + + for a, b in HAND21_EDGES: + if a >= J or b >= J: + continue + if not vis(a) or not vis(b): + continue + ax, ay, ac = out[a] + bx, by, bc = out[b] + d = math.hypot(ax - bx, ay - by) + if d > max_bone: + if ac <= bc: + out[a] = [0.0, 0.0, 0.0] + else: + out[b] = [0.0, 0.0, 0.0] + + return _flatten_keypoints_2d([(x, y, c) for x, y, c in out]) + + +def _body_head_root_scale_from_pose( + pose_arr: Optional[List[float]], *, conf_gate: float +) -> Optional[Tuple[Tuple[float, float], float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return None + kps = _reshape_keypoints_2d(pose_arr) + + def vis(j: int) -> Optional[Tuple[float, float]]: + if j >= len(kps): + return None + x, y, c = kps[j] + if c >= conf_gate and not (x == 0 and y == 0): + return (float(x), float(y)) + return None + + pts = [] + for j in [0, 1, 14, 15, 16, 17]: + p = vis(j) + if p is not None: + pts.append(p) + + if not pts: + return None + + rx = sum(p[0] for p in pts) / len(pts) + ry = sum(p[1] for p in pts) / len(pts) + root = (rx, ry) + + def dist(a: int, b: int) -> Optional[float]: + pa, pb = vis(a), vis(b) + if pa is None or pb is None: + return None + d = math.hypot(pa[0] - pb[0], pa[1] - pb[1]) + return d if d > 1e-3 else None + + cands = [dist(14, 15), dist(16, 17), dist(2, 5)] + cands = [c for c in cands if c is not None] + if not cands: + return None + + scale = float(sum(cands) / len(cands)) + return root, scale + + +def _body_wrist_root_scale_from_pose( + pose_arr: Optional[List[float]], *, side: str, conf_gate: float +) -> Optional[Tuple[Tuple[float, float], float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return None + kps = _reshape_keypoints_2d(pose_arr) + + if side == "right": + w, e = 4, 3 + else: + w, e = 7, 6 + + def vis(j: int) -> Optional[Tuple[float, float]]: + if j >= len(kps): + return None + x, y, c = kps[j] + if c >= conf_gate and not (x == 0 and y == 0): + return (float(x), float(y)) + return None + + pw = vis(w) + if pw is None: + return None + root = pw + + pe = vis(e) + scale = None + if pe is not None: + d = math.hypot(pw[0] - pe[0], pw[1] - pe[1]) + if d > 1e-3: + scale = d + + if scale is None: + p2 = vis(2) + p5 = vis(5) + if p2 is not None and p5 is not None: + d = math.hypot(p2[0] - p5[0], p2[1] - p5[1]) + if d > 1e-3: + scale = d + + if scale is None: + return None + + return root, float(scale) + + +def _smooth_dense_seq_anchored_to_body( + dense_seq: List[Optional[List[float]]], + body_pose_seq: List[Optional[List[float]]], + *, + kind: str, + conf_gate_dense: float, + conf_gate_body: float, + median3: bool, + zero_lag_alpha: float, +) -> List[Optional[List[float]]]: + if not dense_seq: + return dense_seq + + Jd = None + for a in dense_seq: + if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0: + Jd = len(a) // 3 + break + if Jd is None: + return dense_seq + + T = len(dense_seq) + out = [None if a is None else list(a) for a in dense_seq] + + norm_seq: List[Optional[List[float]]] = [None] * T + + for t in range(T): + arr = out[t] + body = body_pose_seq[t] if t < len(body_pose_seq) else None + if not isinstance(arr, list) or len(arr) != Jd * 3 or not isinstance(body, list): + norm_seq[t] = arr + continue + + if kind == "face": + rs = _body_head_root_scale_from_pose(body, conf_gate=conf_gate_body) + elif kind == "hand_left": + rs = _body_wrist_root_scale_from_pose(body, side="left", conf_gate=conf_gate_body) + else: + rs = _body_wrist_root_scale_from_pose(body, side="right", conf_gate=conf_gate_body) + + if rs is None: + norm_seq[t] = arr + continue + + (rx, ry), s = rs + if s <= 1e-6: + norm_seq[t] = arr + continue + + nn = list(arr) + for j in range(Jd): + x = float(arr[3 * j + 0]) + y = float(arr[3 * j + 1]) + c = float(arr[3 * j + 2]) + if c >= conf_gate_dense and not (x == 0 and y == 0): + nn[3 * j + 0] = (x - rx) / s + nn[3 * j + 1] = (y - ry) / s + norm_seq[t] = nn + + if median3: + norm_seq = _median3_pose_seq(norm_seq, conf_gate=conf_gate_dense) + + norm_seq = _zero_lag_ema_pose_seq(norm_seq, alpha=zero_lag_alpha, conf_gate=conf_gate_dense) + + for t in range(T): + arrn = norm_seq[t] + body = body_pose_seq[t] if t < len(body_pose_seq) else None + if not isinstance(arrn, list) or len(arrn) != Jd * 3 or not isinstance(body, list): + continue + + if kind == "face": + rs = _body_head_root_scale_from_pose(body, conf_gate=conf_gate_body) + elif kind == "hand_left": + rs = _body_wrist_root_scale_from_pose(body, side="left", conf_gate=conf_gate_body) + else: + rs = _body_wrist_root_scale_from_pose(body, side="right", conf_gate=conf_gate_body) + + if rs is None: + continue + + (rx, ry), s = rs + if s <= 1e-6: + continue + + orig = out[t] + for j in range(Jd): + x = float(arrn[3 * j + 0]) + y = float(arrn[3 * j + 1]) + c = float(arrn[3 * j + 2]) + + ox = float(orig[3 * j + 0]) + oy = float(orig[3 * j + 1]) + oc = float(orig[3 * j + 2]) + + if oc >= conf_gate_dense and not (ox == 0 and oy == 0) and c >= conf_gate_dense: + orig[3 * j + 0] = rx + x * s + orig[3 * j + 1] = ry + y * s + + out[t] = orig + + return out + + +def smooth_KPS_json_obj( + data: Any, + *, + keep_face_untouched: bool = True, + keep_hands_untouched: bool = True, + filter_extra_people: Optional[bool] = None, +) -> Any: + if not isinstance(data, list): + raise ValueError("Expected top-level JSON to be a list of frames.") + + if filter_extra_people is None: + filter_extra_people = bool(FILTER_EXTRA_PEOPLE) + + chosen_people: List[Optional[Dict[str, Any]]] = [None] * len(data) + + if MAIN_PERSON_MODE == "longest_track": + tracks = _build_tracks_over_video(data) + main_tr = _pick_main_track(tracks) + + if main_tr is not None: + for t in range(len(data)): + if t in main_tr.frames: + chosen_people[t] = main_tr.frames[t] + else: + prev_center: Optional[Tuple[float, float]] = None + for i, frame in enumerate(data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or len(people) == 0: + continue + chosen = _choose_single_person(people, prev_center) + chosen_people[i] = chosen + if chosen is not None: + c = _body_center_from_pose(chosen.get("pose_keypoints_2d")) + if c is not None: + prev_center = c + else: + prev_center: Optional[Tuple[float, float]] = None + for i, frame in enumerate(data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or len(people) == 0: + continue + chosen = _choose_single_person(people, prev_center) + chosen_people[i] = chosen + if chosen is not None: + c = _body_center_from_pose(chosen.get("pose_keypoints_2d")) + if c is not None: + prev_center = c + + pose_seq: List[Optional[List[float]]] = [] + for p in chosen_people: + pose_seq.append(p.get("pose_keypoints_2d") if isinstance(p, dict) else None) + + if SPATIAL_OUTLIER_FIX: + pose_seq = [ + _suppress_spatial_outliers_in_pose_arr(arr, conf_gate=CONF_GATE_BODY) if arr is not None else None + for arr in pose_seq + ] + + if GAP_FILL_ENABLED: + pose_seq = _denoise_and_fill_gaps_pose_seq( + pose_seq, + conf_gate=CONF_GATE_BODY, + min_run=MIN_RUN_FRAMES, + max_gap=MAX_GAP_FRAMES, + ) + + if TORSO_SYNC_ENABLED: + pose_seq = _sync_group_appearances( + pose_seq, + group=TORSO_JOINTS, + conf_gate=CONF_GATE_BODY, + lookahead=TORSO_LOOKAHEAD_FRAMES, + ) + + pose_seq = [ + ( + _suppress_isolated_joints_in_pose_arr(arr, conf_gate=CONF_GATE_BODY, keep=TORSO_JOINTS) + if arr is not None + else None + ) + for arr in pose_seq + ] + + if MEDIAN3_ENABLED: + pose_seq = _median3_pose_seq(pose_seq, conf_gate=CONF_GATE_BODY) + + if SUPER_SMOOTH_ENABLED: + pose_seq = _zero_lag_ema_pose_seq(pose_seq, alpha=SUPER_SMOOTH_ALPHA, conf_gate=SUPER_SMOOTH_MIN_CONF) + + if ROOTSCALE_CARRY_ENABLED: + pose_seq = _carry_pose_when_torso_missing( + pose_seq, + conf_gate=CARRY_CONF_GATE, + max_carry=CARRY_MAX_FRAMES, + anchor_joints=CARRY_ANCHOR_JOINTS, + min_anchors=CARRY_MIN_ANCHORS, + ) + + pose_seq = _force_full_torso_pair( + pose_seq, + conf_gate=CARRY_CONF_GATE, + anchor_joints=CARRY_ANCHOR_JOINTS, + min_anchors=CARRY_MIN_ANCHORS, + max_lookback=240, + fill_legs_with_hip=True, + always_fill_if_one_hip=True, + ) + + face_seq: List[Optional[List[float]]] = [] + lh_seq: List[Optional[List[float]]] = [] + rh_seq: List[Optional[List[float]]] = [] + + for p in chosen_people: + if isinstance(p, dict): + face_seq.append(p.get("face_keypoints_2d")) + lh_seq.append(p.get("hand_left_keypoints_2d")) + rh_seq.append(p.get("hand_right_keypoints_2d")) + else: + face_seq.append(None) + lh_seq.append(None) + rh_seq.append(None) + + if HANDS_SMOOTH_ENABLED and (not keep_hands_untouched): + lh_seq = [ + _suppress_spatial_outliers_in_hand_arr(a, conf_gate=CONF_GATE_HAND) if a is not None else None + for a in lh_seq + ] + rh_seq = [ + _suppress_spatial_outliers_in_hand_arr(a, conf_gate=CONF_GATE_HAND) if a is not None else None + for a in rh_seq + ] + + lh_seq = _remove_short_presence_runs_kps_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT, min_run=MIN_HAND_RUN_FRAMES + ) + rh_seq = _remove_short_presence_runs_kps_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT, min_run=MIN_HAND_RUN_FRAMES + ) + + lh_seq = _zero_sparse_frames_kps_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT + ) + rh_seq = _zero_sparse_frames_kps_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT + ) + + if DENSE_GAP_FILL_ENABLED: + lh_seq = _denoise_and_fill_gaps_pose_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + rh_seq = _denoise_and_fill_gaps_pose_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + + if FACE_SMOOTH_ENABLED and (not keep_face_untouched): + if DENSE_GAP_FILL_ENABLED: + face_seq = _denoise_and_fill_gaps_pose_seq( + face_seq, conf_gate=CONF_GATE_FACE, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + + if FACE_SMOOTH_ENABLED and (not keep_face_untouched): + face_seq = _smooth_dense_seq_anchored_to_body( + face_seq, + pose_seq, + kind="face", + conf_gate_dense=CONF_GATE_FACE, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + + if HANDS_SMOOTH_ENABLED and (not keep_hands_untouched): + lh_seq = _smooth_dense_seq_anchored_to_body( + lh_seq, + pose_seq, + kind="hand_left", + conf_gate_dense=CONF_GATE_HAND, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + rh_seq = _smooth_dense_seq_anchored_to_body( + rh_seq, + pose_seq, + kind="hand_right", + conf_gate_dense=CONF_GATE_HAND, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + + out_frames = [] + body_state: Optional[BodyState] = None + + for i, frame in enumerate(data): + if not isinstance(frame, dict): + out_frames.append(frame) + continue + + frame_out = copy.deepcopy(frame) + chosen = chosen_people[i] + + if chosen is None: + if filter_extra_people: + frame_out["people"] = [] + out_frames.append(frame_out) + continue + + p_out = copy.deepcopy(chosen) + p_out["pose_keypoints_2d"] = pose_seq[i] + + pose_arr = p_out.get("pose_keypoints_2d") + joints = (len(pose_arr) // 3) if isinstance(pose_arr, list) else 0 + if body_state is None: + body_state = BodyState(joints if joints > 0 else 18) + + p_out["pose_keypoints_2d"] = _smooth_body_pose(p_out.get("pose_keypoints_2d"), body_state) + + if FACE_SMOOTH_ENABLED and (not keep_face_untouched): + p_out["face_keypoints_2d"] = face_seq[i] + else: + p_out["face_keypoints_2d"] = chosen.get("face_keypoints_2d", p_out.get("face_keypoints_2d")) + + if HANDS_SMOOTH_ENABLED and (not keep_hands_untouched): + p_out["hand_left_keypoints_2d"] = lh_seq[i] + p_out["hand_right_keypoints_2d"] = rh_seq[i] + else: + p_out["hand_left_keypoints_2d"] = chosen.get("hand_left_keypoints_2d", p_out.get("hand_left_keypoints_2d")) + p_out["hand_right_keypoints_2d"] = chosen.get( + "hand_right_keypoints_2d", p_out.get("hand_right_keypoints_2d") + ) + + _pin_body_wrist_to_hand( + p_out, side="left", conf_gate_body=CONF_GATE_BODY, conf_gate_hand=CONF_GATE_HAND, blend=1.0 + ) + _pin_body_wrist_to_hand( + p_out, side="right", conf_gate_body=CONF_GATE_BODY, conf_gate_hand=CONF_GATE_HAND, blend=1.0 + ) + + _fix_elbow_using_wrist(p_out, side="left", conf_gate=CONF_GATE_BODY) + _fix_elbow_using_wrist(p_out, side="right", conf_gate=CONF_GATE_BODY) + + if filter_extra_people: + frame_out["people"] = [p_out] + else: + orig_people = frame.get("people", []) + if not isinstance(orig_people, list): + frame_out["people"] = [p_out] + else: + replaced = False + new_people = [] + for op in orig_people: + if (not replaced) and (op is chosen): + new_people.append(p_out) + replaced = True + else: + new_people.append(copy.deepcopy(op)) + if not replaced: + new_people = [p_out] + [copy.deepcopy(op) for op in orig_people] + frame_out["people"] = new_people + + out_frames.append(frame_out) + + return out_frames + + +# ============================================================ +# === END: smooth_KPS_json.py logic +# ============================================================ + + +# ============================================================ +# === START: render_pose_video.py logic (ported to frame render) +# ============================================================ + +OP_COLORS: List[Tuple[int, int, int]] = [ + (255, 0, 0), + (255, 85, 0), + (255, 170, 0), + (255, 255, 0), + (170, 255, 0), + (85, 255, 0), + (0, 255, 0), + (0, 255, 85), + (0, 255, 170), + (0, 255, 255), + (0, 170, 255), + (0, 85, 255), + (0, 0, 255), + (85, 0, 255), + (170, 0, 255), + (255, 0, 255), + (255, 0, 170), + (255, 0, 85), +] + +BODY_EDGES: List[Tuple[int, int]] = [ + (1, 2), + (1, 5), + (2, 3), + (3, 4), + (5, 6), + (6, 7), + (1, 8), + (8, 9), + (9, 10), + (1, 11), + (11, 12), + (12, 13), + (1, 0), + (0, 14), + (14, 16), + (0, 15), + (15, 17), +] + +BODY_EDGE_COLORS = OP_COLORS[: len(BODY_EDGES)] +BODY_JOINT_COLORS = OP_COLORS + +HAND_EDGES: List[Tuple[int, int]] = [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (0, 5), + (5, 6), + (6, 7), + (7, 8), + (0, 9), + (9, 10), + (10, 11), + (11, 12), + (0, 13), + (13, 14), + (14, 15), + (15, 16), + (0, 17), + (17, 18), + (18, 19), + (19, 20), +] + + +def _valid_pt(x: float, y: float, c: float, conf_thresh: float) -> bool: + return (c is not None) and (c >= conf_thresh) and not (x == 0 and y == 0) + + +def _hsv_to_bgr(h: float, s: float, v: float) -> Tuple[int, int, int]: + H = int(np.clip(h, 0.0, 1.0) * 179.0) + S = int(np.clip(s, 0.0, 1.0) * 255.0) + V = int(np.clip(v, 0.0, 1.0) * 255.0) + hsv = np.uint8([[[H, S, V]]]) + bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0] + return int(bgr[0]), int(bgr[1]), int(bgr[2]) + + +def _looks_normalized(points: List[Tuple[float, float, float]], conf_thresh: float) -> bool: + valid = [(x, y, c) for (x, y, c) in points if _valid_pt(x, y, c, conf_thresh)] + if not valid: + return False + in01 = sum(1 for (x, y, _) in valid if 0.0 <= x <= 1.0 and 0.0 <= y <= 1.0) + return (in01 / float(len(valid))) >= 0.7 + + +def _draw_body( + canvas: np.ndarray, pose: List[Tuple[float, float, float]], conf_thresh: float, xinsr_stick_scaling: bool = False +) -> None: + CH, CW = canvas.shape[:2] + stickwidth = 2 + + valid = [(x, y, c) for (x, y, c) in pose if _valid_pt(x, y, c, conf_thresh)] + norm = False + if valid: + in01 = sum(1 for (x, y, _) in valid if 0.0 <= x <= 1.0 and 0.0 <= y <= 1.0) + norm = (in01 / float(len(valid))) >= 0.7 + + def to_px(x: float, y: float) -> Tuple[float, float]: + if norm: + return x * CW, y * CH + return x, y + + max_side = max(CW, CH) + if xinsr_stick_scaling: + stick_scale = 1 if max_side < 500 else min(2 + (max_side // 1000), 7) + else: + stick_scale = 1 + + for idx, (a, b) in enumerate(BODY_EDGES): + if a >= len(pose) or b >= len(pose): + continue + + ax, ay, ac = pose[a] + bx, by, bc = pose[b] + if not (_valid_pt(ax, ay, ac, conf_thresh) and _valid_pt(bx, by, bc, conf_thresh)): + continue + + ax, ay = to_px(ax, ay) + bx, by = to_px(bx, by) + + base = BODY_EDGE_COLORS[idx] if idx < len(BODY_EDGE_COLORS) else (255, 255, 255) + + X = np.array([ay, by], dtype=np.float32) + Y = np.array([ax, bx], dtype=np.float32) + + mX = float(np.mean(X)) + mY = float(np.mean(Y)) + length = float(np.hypot(X[0] - X[1], Y[0] - Y[1])) + if length < 1.0: + continue + + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + + polygon = cv2.ellipse2Poly( + (int(mY), int(mX)), + (int(length / 2), int(stickwidth * stick_scale)), + int(angle), + 0, + 360, + 1, + ) + + cv2.fillConvexPoly( + canvas, + polygon, + (int(base[0] * 0.6), int(base[1] * 0.6), int(base[2] * 0.6)), + ) + + for j, (x, y, c) in enumerate(pose): + if not _valid_pt(x, y, c, conf_thresh): + continue + x, y = to_px(x, y) + col = BODY_JOINT_COLORS[j] if j < len(BODY_JOINT_COLORS) else (255, 255, 255) + cv2.circle(canvas, (int(x), int(y)), 2, col, thickness=-1) + + +def _draw_hand(canvas: np.ndarray, hand: List[Tuple[float, float, float]], conf_thresh: float) -> None: + if not hand or len(hand) < 21: + return + + CH, CW = canvas.shape[:2] + norm = _looks_normalized(hand, conf_thresh) + + def to_px(x: float, y: float) -> Tuple[float, float]: + return (x * CW, y * CH) if norm else (x, y) + + n_edges = len(HAND_EDGES) + for i, (a, b) in enumerate(HAND_EDGES): + x1, y1, c1 = hand[a] + x2, y2, c2 = hand[b] + if _valid_pt(x1, y1, c1, conf_thresh) and _valid_pt(x2, y2, c2, conf_thresh): + x1, y1 = to_px(x1, y1) + x2, y2 = to_px(x2, y2) + bgr = _hsv_to_bgr(i / float(n_edges), 1.0, 1.0) + cv2.line(canvas, (int(x1), int(y1)), (int(x2), int(y2)), bgr, 1, cv2.LINE_AA) + + for x, y, c in hand: + if _valid_pt(x, y, c, conf_thresh): + x, y = to_px(x, y) + cv2.circle(canvas, (int(x), int(y)), 1, (0, 0, 255), -1, cv2.LINE_AA) + + +def _draw_face(canvas: np.ndarray, face: List[Tuple[float, float, float]], conf_thresh: float) -> None: + if not face: + return + + CH, CW = canvas.shape[:2] + norm = _looks_normalized(face, conf_thresh) + + def to_px(x: float, y: float) -> Tuple[float, float]: + return (x * CW, y * CH) if norm else (x, y) + + for x, y, c in face: + if _valid_pt(x, y, c, conf_thresh): + x, y = to_px(x, y) + cv2.circle(canvas, (int(x), int(y)), 0, (255, 255, 255), -1, cv2.LINE_AA) + + +def _draw_pose_frame_full( + w: int, + h: int, + person: Dict[str, Any], + conf_thresh_body: float = 0.10, + conf_thresh_hands: float = 0.10, + conf_thresh_face: float = 0.10, +) -> np.ndarray: + img = np.zeros((h, w, 3), dtype=np.uint8) + + pose = _reshape_keypoints_2d(person.get("pose_keypoints_2d") or []) + face = _reshape_keypoints_2d(person.get("face_keypoints_2d") or []) + hand_l = _reshape_keypoints_2d(person.get("hand_left_keypoints_2d") or []) + hand_r = _reshape_keypoints_2d(person.get("hand_right_keypoints_2d") or []) + + if pose: + _draw_body(img, pose, conf_thresh_body) + if hand_l: + _draw_hand(img, hand_l, conf_thresh_hands) + if hand_r: + _draw_hand(img, hand_r, conf_thresh_hands) + if face: + _draw_face(img, face, conf_thresh_face) + + return img + + +# ============================================================ +# === END: render_pose_video.py logic +# ============================================================ + + +# ============================================================ +# ComfyUI mappings +# ============================================================ + +NODE_CLASS_MAPPINGS = { + "TSPoseDataSmoother": KPSSmoothPoseDataAndRender, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TSPoseDataSmoother": "KPS: Smooth + Render (pose_data/PKL)", +} diff --git a/zavodik/nodes/comfyui-teskors-utils-main/rename_files.py b/zavodik/nodes/comfyui-teskors-utils-main/rename_files.py new file mode 100644 index 0000000000000000000000000000000000000000..8bf2b1cb59fd84f23606cec91304fe07ce87fcbc --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils-main/rename_files.py @@ -0,0 +1,200 @@ +import os +import re +import uuid +import shutil + + +def extract_first_number(s: str): + match = re.search(r"\d+", s) + return int(match.group()) if match else float("inf") + + +sort_methods = [ + "None", + "Alphabetical (ASC)", + "Alphabetical (DESC)", + "Numerical (ASC)", + "Numerical (DESC)", + "Datetime (ASC)", + "Datetime (DESC)", +] + + +def sort_by(items, base_path=".", method=None): + def fullpath(x): + return os.path.join(base_path, x) + + def get_timestamp(path): + try: + return os.path.getmtime(path) + except FileNotFoundError: + return float("-inf") + + if method == "Alphabetical (ASC)": + return sorted(items) + elif method == "Alphabetical (DESC)": + return sorted(items, reverse=True) + elif method == "Numerical (ASC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0])) + elif method == "Numerical (DESC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True) + elif method == "Datetime (ASC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x))) + elif method == "Datetime (DESC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True) + else: + return items + + +def _safe_list_files(directory: str): + return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] + + +def _format_name(index: int, digits: int, prefix: str, ext: str): + """ + ext ожидается как ".png"/".jpg"/".jpeg" (с точкой). + ВАЖНО: underscore после номера ВСЕГДА, потом расширение как есть. + Пример: prefix_0001_.png + """ + num = str(index).zfill(digits) + left = f"{prefix}_" if prefix else "" + return f"{left}{num}_{ext}" + + +def _index_taken(directory: str, digits: int, prefix: str, index: int) -> bool: + """ + Проверяем, занят ли номер index ЛЮБЫМ расширением в папке. + Т.е. если есть prefix_0001_.png, то prefix_0001_.jpg уже нельзя. + """ + num = str(index).zfill(digits) + left = f"{prefix}_" if prefix else "" + start = f"{left}{num}_" + + try: + entries = os.listdir(directory) + except FileNotFoundError: + return False + + for f in entries: + p = os.path.join(directory, f) + if os.path.isfile(p) and f.startswith(start): + return True + return False + + +def _find_next_free_index(directory: str, digits: int, prefix: str, start_from: int = 1) -> int: + idx = max(1, int(start_from)) + while _index_taken(directory, digits, prefix, idx): + idx += 1 + return idx + + +class RenameFilesInDir: + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "directory": ("STRING", {"default": ""}), + }, + "optional": { + "output_directory": ("STRING", {"default": ""}), + "sort_method": (sort_methods,), + "start_index": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "step": 1}), + "files_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "prefix": ("STRING", {"default": ""}), + "digits": ("INT", {"default": 4, "min": 1, "max": 16, "step": 1}), + }, + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("COUNT",) + FUNCTION = "run" + CATEGORY = "InspirePack/files" + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + def run( + self, + directory: str, + output_directory: str = "", + sort_method=None, + start_index: int = 0, + files_load_cap: int = 0, + prefix: str = "", + digits: int = 4, + ): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory}' cannot be found.") + + files = _safe_list_files(directory) + if not files: + return (0,) + + files = sort_by(files, directory, sort_method) + files = files[start_index:] + + if files_load_cap > 0: + files = files[:files_load_cap] + + if not files: + return (0,) + + inplace = (output_directory is None) or (str(output_directory).strip() == "") + + if not inplace: + os.makedirs(output_directory, exist_ok=True) + + count = 0 + + # ---------- COPY MODE ---------- + if not inplace: + for fname in files: + src = os.path.join(directory, fname) + _, ext = os.path.splitext(fname) # ext = ".png" / ".jpg" / ... + + next_idx = _find_next_free_index(output_directory, digits, prefix, start_from=1) + new_name = _format_name(next_idx, digits, prefix, ext) + + dst = os.path.join(output_directory, new_name) + shutil.copy2(src, dst) + count += 1 + + return (count,) + + # ---------- INPLACE RENAME ---------- + temp_map = [] + used_temp = set() + + def _make_temp_name(old_name: str): + while True: + t = f"__tmp__{uuid.uuid4().hex}__{old_name}" + if t not in used_temp and not os.path.exists(os.path.join(directory, t)): + used_temp.add(t) + return t + + # phase1 -> temp + for fname in files: + old_path = os.path.join(directory, fname) + tmp = _make_temp_name(fname) + tmp_path = os.path.join(directory, tmp) + + os.rename(old_path, tmp_path) + temp_map.append((tmp, fname)) + + # phase2 -> final + for tmp, original_name in temp_map: + tmp_path = os.path.join(directory, tmp) + _, ext = os.path.splitext(original_name) + + next_idx = _find_next_free_index(directory, digits, prefix, start_from=1) + new_name = _format_name(next_idx, digits, prefix, ext) + + new_path = os.path.join(directory, new_name) + os.rename(tmp_path, new_path) + count += 1 + + return (count,) diff --git a/zavodik/nodes/comfyui-teskors-utils-main/requirements.txt b/zavodik/nodes/comfyui-teskors-utils-main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb3738c2299fbaa9ad35f3cf60f50a284a883e99 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils-main/requirements.txt @@ -0,0 +1,2 @@ +numpy +opencv-python diff --git a/zavodik/nodes/comfyui-teskors-utils-main/save_load_pose.py b/zavodik/nodes/comfyui-teskors-utils-main/save_load_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..be9bae74e8bf53a44b79584e33f616351d8805e3 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils-main/save_load_pose.py @@ -0,0 +1,128 @@ +import os +import time +import pickle +import glob +import folder_paths + + +def _ensure_output_dir(): + out_dir = folder_paths.get_output_directory() + os.makedirs(out_dir, exist_ok=True) + return out_dir + + +# ------------------------- +# UI: list PKL/PT under input/** (recursive) +# ------------------------- +def _list_all_pkl_under_input(): + inp = folder_paths.get_input_directory() + exts = (".pkl", ".pickle", ".pt") + + files = [] + for ext in exts: + pattern = os.path.join(inp, "**", f"*{ext}") + files.extend(glob.glob(pattern, recursive=True)) + + rel = [] + for f in files: + if os.path.isfile(f): + r = os.path.relpath(f, inp).replace("\\", "/") + rel.append(r) + + rel = sorted(set(rel)) + return rel if rel else [""] + + +def _abs_from_input(rel_path: str) -> str: + inp = folder_paths.get_input_directory() + return os.path.join(inp, rel_path).replace("\\", "/") + + +def _make_unique_path(base_path: str) -> str: + """ + If file exists, append incremental suffix: + pose_data.pkl + pose_data_0001.pkl + pose_data_0002.pkl + """ + if not os.path.exists(base_path): + return base_path + + directory = os.path.dirname(base_path) + name = os.path.basename(base_path) + base, ext = os.path.splitext(name) + + idx = 1 + while True: + new_name = f"{base}_{idx:04d}{ext}" + new_path = os.path.join(directory, new_name) + if not os.path.exists(new_path): + return new_path + idx += 1 + + +def _default_filename(prefix: str, ext: str): + ts = time.strftime("%Y%m%d_%H%M%S") + return f"{prefix}_{ts}{ext}" + + +class TSSavePoseDataAsPickle: + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pose_data": ("POSEDATA",), + "filename": ("STRING", {"default": ""}), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("saved_path",) + FUNCTION = "save" + CATEGORY = "save" + + def save(self, pose_data, filename=""): + out_dir = _ensure_output_dir() + filename = (filename or "").strip() + if not filename: + filename = _default_filename("pose_data", ".pkl") + if not filename.lower().endswith((".pkl", ".pickle")): + filename += ".pkl" + + abs_path = _make_unique_path(os.path.join(out_dir, filename)) + + with open(abs_path, "wb") as f: + pickle.dump(pose_data, f, protocol=pickle.HIGHEST_PROTOCOL) + + return (abs_path,) + + +class TSLoadPoseDataPickle: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + # dropdown + Upload, recursive input/** + "file": (_list_all_pkl_under_input(),), + } + } + + RETURN_TYPES = ("POSEDATA",) + RETURN_NAMES = ("pose_data",) + FUNCTION = "load" + CATEGORY = "load" + + def load(self, file): + if not isinstance(file, str) or not file.strip(): + raise ValueError("TS PoseData Pickle: Please select a .pkl/.pt file.") + + abs_path = _abs_from_input(file) + if not os.path.isfile(abs_path): + raise ValueError(f"TS PoseData Pickle: File not found: {abs_path}") + + with open(abs_path, "rb") as f: + pose_data = pickle.load(f) + + return (pose_data,) diff --git a/zavodik/nodes/comfyui-teskors-utils/.gitignore b/zavodik/nodes/comfyui-teskors-utils/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6e253c8a6cfa915529ddbc17dd7e6cbfcadce10b --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/.gitignore @@ -0,0 +1,12 @@ +__pycache__/ +*.rar +" /" +__pycache__/ +*.rar +" /" +__pycache__/ +*.rar +" /" +__pycache__/ +*.rar +" /" diff --git a/zavodik/nodes/comfyui-teskors-utils/README.md b/zavodik/nodes/comfyui-teskors-utils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4840dc3caa04f3ac9a99b2541c4fab9474546146 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/README.md @@ -0,0 +1,133 @@ +# ComfyUI Teskor's Utils + +Utility nodes for **ComfyUI** focused on **video workflows**, **OpenPose stability**, and **chunk-based video generation**. + +Main use cases: + +- WanVideo +- AnimateDiff +- ControlNet OpenPose +- Long videos +- Chunked generation + +Goal: + +Improve **stability** and **visual consistency** in generated videos. + + +--- + +# Main Nodes + +## OpenPose Smoother + +### Purpose + +Stabilizes OpenPose pose data across frames. + +Raw OpenPose detection is noisy and causes: + +- Pose jitter +- Hand flicker +- Missing joints +- Detection noise +- Random extra people + +### What It Does + +- Smooths keypoints over time +- Fills small gaps +- Filters unstable detections +- Optional extra-person removal +- Outputs smoothed POSEDATA + +### Typical Pipeline + +Load Video +→ OpenPose Detection +→ OpenPose Smoother +→ ControlNet OpenPose +→ Video Generation + + +--- + +## Color Match Sequential Bias + +### Purpose + +Removes **color drift between video chunks**. + +Designed for chunk-based generation: + +- WanVideo +- AnimateDiff +- Long videos + +### Problem + +Chunked generation causes small differences in: + +- Brightness +- Color balance +- Contrast + +This makes chunk boundaries visible. + +### What It Does + +For each chunk: + +1. Measures previous chunk colors +2. Measures current chunk colors +3. Calculates difference +4. Applies correction + +Result: + +- Consistent colors +- Invisible chunk borders +- Continuous video + +### Important Setting + +chunk_size must match generation chunk size. + +Example: + +If chunks are 81 frames: + +chunk_size = 81 + + +### Typical Pipeline + +WanVideo Animate Embeds +→ Combine Frames +→ Color Match Sequential Bias +→ Save Video + + +--- + +# Other Nodes + +Additional utilities: + +- Batch video loading +- Pose data save/load +- Preview image without metadata +- Preview video without metadata +- Batch file renaming + + +--- + +# Recommended For + +- WanVideo workflows +- AnimateDiff workflows +- OpenPose animation +- ControlNet OpenPose +- Long video generation +- Chunked workflows diff --git a/zavodik/nodes/comfyui-teskors-utils/__init__.py b/zavodik/nodes/comfyui-teskors-utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfbdb6e409f5a7448dd261607fdeb3b6b01273f1 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/__init__.py @@ -0,0 +1,32 @@ +from .nodes.save_load_pose import TSSavePoseDataAsPickle, TSLoadPoseDataPickle +from .nodes.openpose_smoother import KPSSmoothPoseDataAndRender +from .nodes.load_video_batch import LoadVideoBatchListFromDir +from .nodes.rename_files import RenameFilesInDir +from .nodes.color_match import TSColorMatchSequentialBias +from .nodes.preview_image_metadata import PreviewImageNoMetadata +from .nodes.video_combine_metadata import TSVideoCombineNoMetadata + + +NODE_CLASS_MAPPINGS = { + "TSSavePoseDataAsPickle": TSSavePoseDataAsPickle, + "TSLoadPoseDataPickle": TSLoadPoseDataPickle, + "TSPoseDataSmoother": KPSSmoothPoseDataAndRender, + "TSLoadVideoBatchListFromDir": LoadVideoBatchListFromDir, + "TSRenameFilesInDir": RenameFilesInDir, + "TSColorMatch": TSColorMatchSequentialBias, + "TSPreviewImageNoMetadata": PreviewImageNoMetadata, + "TSVideoCombineNoMetadata": TSVideoCombineNoMetadata, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TSSavePoseDataAsPickle": "TS Save Pose Data (PKL)", + "TSLoadPoseDataPickle": "TS Load Pose Data (PKL)", + "TSPoseDataSmoother": "TS Pose Data Smoother", + "TSLoadVideoBatchListFromDir": "TS Load Video Batch List From Dir", + "TSRenameFilesInDir": "TS Rename Files In Dir", + "TSColorMatch": "TS Color Match", + "TSPreviewImageNoMetadata": "TS Preview Image No Metadata", + "TSVideoCombineNoMetadata": "TS Video Combine No Metadata", +} + +WEB_DIRECTORY = "web" diff --git a/zavodik/nodes/comfyui-teskors-utils/__pycache__/__init__.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc4f554cd021d51b40309a43c6e7d698a101148f Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/__pycache__/__init__.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/example workflows/openpose smoother example.json b/zavodik/nodes/comfyui-teskors-utils/example workflows/openpose smoother example.json new file mode 100644 index 0000000000000000000000000000000000000000..11abec40a855de01ddb34d91e007cfe84e59f522 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/example workflows/openpose smoother example.json @@ -0,0 +1,649 @@ +{ + "id": "a3413555-e17c-48a0-81ac-2abc72bcc383", + "revision": 0, + "last_node_id": 10, + "last_link_id": 9, + "nodes": [ + { + "id": 1, + "type": "OnnxDetectionModelLoader", + "pos": [ + 1303.8404112732023, + 1000.3993178043474 + ], + "size": [ + 370.3601847372395, + 106 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "model", + "type": "POSEMODEL", + "links": [ + 3 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanAnimatePreprocess", + "ver": "2fcbcae7eec637fdc712fdec18e6266feb8ba3a7", + "Node name for S&R": "OnnxDetectionModelLoader", + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "vitpose_h_wholebody_model.onnx", + "yolov10m.onnx", + "CUDAExecutionProvider" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 2, + "type": "DrawViTPose", + "pos": [ + 1689.0075439521988, + 1001.080853311867 + ], + "size": [ + 270, + 178 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 1 + } + ], + "outputs": [ + { + "name": "pose_images", + "type": "IMAGE", + "links": [ + 6 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanAnimatePreprocess", + "ver": "1a35b81a418bbba093356ad19b19bf2a76a24f4e", + "Node name for S&R": "DrawViTPose" + }, + "widgets_values": [ + 720, + 1280, + 16, + -1, + -1, + "True" + ] + }, + { + "id": 4, + "type": "PoseAndFaceDetection", + "pos": [ + 1303.9241395257632, + 1139.2387094991327 + ], + "size": [ + 313.125, + 186 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "POSEMODEL", + "link": 3 + }, + { + "name": "images", + "type": "IMAGE", + "link": 4 + }, + { + "name": "retarget_image", + "shape": 7, + "type": "IMAGE", + "link": null + } + ], + "outputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "links": [ + 1, + 5, + 7 + ] + }, + { + "name": "face_images", + "type": "IMAGE", + "links": [] + }, + { + "name": "key_frame_body_points", + "type": "STRING", + "links": null + }, + { + "name": "bboxes", + "type": "BBOX", + "links": null + }, + { + "name": "face_bboxes", + "type": "BBOX,", + "links": null + } + ], + "properties": { + "cnr_id": "ComfyUI-WanAnimatePreprocess", + "ver": "2fcbcae7eec637fdc712fdec18e6266feb8ba3a7", + "Node name for S&R": "PoseAndFaceDetection", + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.5.2", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 720, + 1280 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 7, + "type": "VHS_VideoCombine", + "pos": [ + 1975.5540633549488, + 999.8853100998517 + ], + "size": [ + 343.6813986939137, + 923.4335976780688 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 6 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "0a75c7958fe320efcb052f1d9f8451fd20c730a8", + "Node name for S&R": "VHS_VideoCombine", + "aux_id": "Kosinkadink/ComfyUI-VideoHelperSuite", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "vitpose", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": false, + "trim_to_audio": false, + "pingpong": false, + "save_output": true, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "vitpose_00015.mp4", + "subfolder": "", + "type": "output", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "vitpose_00015.png", + "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\vitpose_00015.mp4" + } + } + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 9, + "type": "VHS_VideoCombine", + "pos": [ + 2327.610258850725, + 998.9714988892509 + ], + "size": [ + 343.6813986939137, + 923.4335976780688 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 8 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "0a75c7958fe320efcb052f1d9f8451fd20c730a8", + "Node name for S&R": "VHS_VideoCombine", + "aux_id": "Kosinkadink/ComfyUI-VideoHelperSuite", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "vitpose", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": false, + "trim_to_audio": false, + "pingpong": false, + "save_output": true, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "vitpose_00015.mp4", + "subfolder": "", + "type": "output", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "vitpose_00015.png", + "fullpath": "D:\\ComfyUI_windows_portable\\ComfyUI\\output\\vitpose_00015.mp4" + } + } + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 6, + "type": "VHS_LoadVideo", + "pos": [ + 899.8789036417043, + 1004.6331112541623 + ], + "size": [ + 368.982535157264, + 943.9689736318157 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 4 + ] + }, + { + "name": "frame_count", + "type": "INT", + "links": [] + }, + { + "name": "audio", + "type": "AUDIO", + "links": [] + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8550981384301e9bc5bfea83e5c2c75258102593", + "Node name for S&R": "VHS_LoadVideo", + "aux_id": "Kosinkadink/ComfyUI-VideoHelperSuite", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.5.2", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "video": "Без названия (21).mp4", + "force_rate": 16, + "custom_width": 720, + "custom_height": 1280, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1, + "format": "Wan", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "Без названия (21).mp4", + "type": "input", + "format": "video/mp4", + "force_rate": 16, + "custom_width": 720, + "custom_height": 1280, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1 + } + } + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 8, + "type": "TSPoseDataSmoother", + "pos": [ + 1689.7506516978146, + 1226.4699592282134 + ], + "size": [ + 271.1328125, + 198 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 8 + ] + }, + { + "name": "pose_data", + "type": "POSEDATA", + "links": [ + 9 + ] + } + ], + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "fc2e6467ab346ae8c92b5d311246009f18489b60", + "Node name for S&R": "TSPoseDataSmoother" + }, + "widgets_values": [ + true, + 0.7, + 12, + 4, + 0.35, + 0.6 + ] + }, + { + "id": 5, + "type": "TSSavePoseDataAsPickle", + "pos": [ + 1304.2517450362282, + 1361.764823613752 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 5 + } + ], + "outputs": [ + { + "name": "saved_path", + "type": "STRING", + "links": null + } + ], + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cbbfd9e1bceb299dbf5004fd0f073f64424469b3", + "Node name for S&R": "TSSavePoseDataAsPickle" + }, + "widgets_values": [ + "not_smoothed_pose" + ] + }, + { + "id": 10, + "type": "TSSavePoseDataAsPickle", + "pos": [ + 1694.382032229848, + 1468.112004774594 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 9 + } + ], + "outputs": [ + { + "name": "saved_path", + "type": "STRING", + "links": null + } + ], + "properties": { + "aux_id": "teskor-hub/comfyui-teskors-utils", + "ver": "cbbfd9e1bceb299dbf5004fd0f073f64424469b3", + "Node name for S&R": "TSSavePoseDataAsPickle" + }, + "widgets_values": [ + "smoothed_pose" + ] + } + ], + "links": [ + [ + 1, + 4, + 0, + 2, + 0, + "POSEDATA" + ], + [ + 3, + 1, + 0, + 4, + 0, + "POSEMODEL" + ], + [ + 4, + 6, + 0, + 4, + 1, + "IMAGE" + ], + [ + 5, + 4, + 0, + 5, + 0, + "POSEDATA" + ], + [ + 6, + 2, + 0, + 7, + 0, + "IMAGE" + ], + [ + 7, + 4, + 0, + 8, + 0, + "POSEDATA" + ], + [ + 8, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 9, + 8, + 1, + 10, + 0, + "POSEDATA" + ] + ], + "groups": [], + "config": {}, + "extra": { + "workflowRendererVersion": "LG", + "ds": { + "scale": 0.6945101947704954, + "offset": [ + -182.65607224442527, + -746.6094078883132 + ] + }, + "frontendVersion": "1.34.9", + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true + }, + "version": 0.4 +} diff --git a/zavodik/nodes/comfyui-teskors-utils/example workflows/wanvideo work flow teskor utils + kijai example.json b/zavodik/nodes/comfyui-teskors-utils/example workflows/wanvideo work flow teskor utils + kijai example.json new file mode 100644 index 0000000000000000000000000000000000000000..cc85762457aaa6126219000e091d5f8db991308f --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/example workflows/wanvideo work flow teskor utils + kijai example.json @@ -0,0 +1,4347 @@ +{ + "id": "8b7a9a57-2303-4ef5-9fc2-bf41713bd1fc", + "revision": 0, + "last_node_id": 188, + "last_link_id": 325, + "nodes": [ + { + "id": 66, + "type": "ImageConcatMulti", + "pos": [ + 2660.1005859375, + -950.2750244140625 + ], + "size": [ + 270, + 150 + ], + "flags": {}, + "order": 63, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": 87 + }, + { + "name": "image_2", + "shape": 7, + "type": "IMAGE", + "link": 107 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 89 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "468fcc86f0b29e79a8510e8239eb15714d6747a6", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 2, + "left", + true, + null + ] + }, + { + "id": 137, + "type": "GetNode", + "pos": [ + 2441.813232421875, + -1173.92919921875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 239 + ] + } + ], + "title": "Get_face_images", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 140, + "type": "GetNode", + "pos": [ + 2441.813232421875, + -1124.576904296875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 243 + ] + } + ], + "title": "Get_pose_images", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 134, + "type": "GetNode", + "pos": [ + 2443.561279296875, + -1227.2171630859375 + ], + "size": [ + 210, + 34 + ], + "flags": { + "collapsed": true + }, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 236 + ] + } + ], + "title": "Get_reference_image", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "reference_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 148, + "type": "SetNode", + "pos": [ + -548.7736206054688, + -2964.476318359375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "link": 255 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_input_audio", + "properties": { + "previousName": "input_audio", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "input_audio" + ] + }, + { + "id": 149, + "type": "GetNode", + "pos": [ + 3043.859130859375, + -1156.882080078125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "AUDIO", + "type": "AUDIO", + "links": [ + 256 + ] + } + ], + "title": "Get_input_audio", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "input_audio" + ] + }, + { + "id": 153, + "type": "SetNode", + "pos": [ + -1605.1839599609375, + -2832.218994140625 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 263 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_width", + "properties": { + "previousName": "width", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "width" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 155, + "type": "GetNode", + "pos": [ + 1319.8330078125, + -167.58360290527344 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 265 + ] + } + ], + "title": "Get_width", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "width" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 156, + "type": "GetNode", + "pos": [ + 1319.8330078125, + -117.76973724365234 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 266 + ] + } + ], + "title": "Get_height", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "height" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 154, + "type": "SetNode", + "pos": [ + -1617.8741455078125, + -2533.993408203125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 264 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_height", + "properties": { + "previousName": "height", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "height" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 63, + "type": "VHS_LoadVideo", + "pos": [ + -876.9246826171875, + -3084.905517578125 + ], + "size": [ + 315.8014221191406, + 310 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + }, + { + "name": "custom_width", + "type": "INT", + "widget": { + "name": "custom_width" + }, + "link": 257 + }, + { + "name": "custom_height", + "type": "INT", + "widget": { + "name": "custom_height" + }, + "link": 258 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 248 + ] + }, + { + "name": "frame_count", + "type": "INT", + "links": [ + 267 + ] + }, + { + "name": "audio", + "type": "AUDIO", + "links": [ + 255 + ] + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_LoadVideo", + "ue_properties": { + "widget_ue_connectable": { + "custom_width": true, + "custom_height": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "video": "raw.mp4", + "force_rate": 16, + "custom_width": 960, + "custom_height": 544, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1, + "format": "AnimateDiff", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "raw.mp4", + "type": "input", + "format": "video/mp4", + "force_rate": 16, + "custom_width": 960, + "custom_height": 544, + "frame_load_cap": 0, + "skip_first_frames": 0, + "select_every_nth": 1 + } + } + } + }, + { + "id": 158, + "type": "GetNode", + "pos": [ + 1319.8330078125, + -70.84321594238281 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 268 + ] + } + ], + "title": "Get_frame_count", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "frame_count" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 131, + "type": "GetNode", + "pos": [ + 1319.8330078125, + -266.1221923828125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 234 + ] + } + ], + "title": "Get_background_image", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "background_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 143, + "type": "GetNode", + "pos": [ + 1319.8330078125, + -213.2382354736328 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 247 + ] + } + ], + "title": "Get_mask", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "mask" + ], + "color": "#1c5715", + "bgcolor": "#1f401b" + }, + { + "id": 157, + "type": "SetNode", + "pos": [ + -528.8223266601562, + -3030.21337890625 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "INT", + "type": "INT", + "link": 267 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_frame_count", + "properties": { + "previousName": "frame_count", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "frame_count" + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 144, + "type": "SetNode", + "pos": [ + -522.720947265625, + -3107.148681640625 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 248 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 306 + ] + } + ], + "title": "Set_input_video", + "properties": { + "previousName": "input_video", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "input_video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 128, + "type": "SetNode", + "pos": [ + -459.54620361328125, + -1650.783935546875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 231 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_reference_image", + "properties": { + "previousName": "reference_image", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "reference_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 64, + "type": "ImageResizeKJv2", + "pos": [ + -772.3116455078125, + -1675.555419921875 + ], + "size": [ + 270, + 336 + ], + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 82 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 286 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 287 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 231 + ] + }, + { + "name": "width", + "type": "INT", + "links": [] + }, + { + "name": "height", + "type": "INT", + "links": [] + }, + { + "name": "mask", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "468fcc86f0b29e79a8510e8239eb15714d6747a6", + "Node name for S&R": "ImageResizeKJv2", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 832, + 480, + "lanczos", + "pad_edge_pixel", + "0, 0, 0", + "top", + 16, + "cpu" + ] + }, + { + "id": 57, + "type": "LoadImage", + "pos": [ + -1116.435791015625, + -1679.51318359375 + ], + "size": [ + 274.080078125, + 314 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 82 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.57", + "Node name for S&R": "LoadImage", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "refer.jpeg", + "image" + ] + }, + { + "id": 142, + "type": "SetNode", + "pos": [ + 1086.6927490234375, + -2124.5263671875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 64, + "mode": 0, + "inputs": [ + { + "name": "MASK", + "type": "MASK", + "link": 245 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 246 + ] + } + ], + "title": "Set_mask", + "properties": { + "previousName": "mask", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "mask" + ], + "color": "#1c5715", + "bgcolor": "#1f401b" + }, + { + "id": 99, + "type": "DrawMaskOnImage", + "pos": [ + 1222.5340576171875, + -2132.91455078125 + ], + "size": [ + 270, + 102 + ], + "flags": {}, + "order": 66, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 250 + }, + { + "name": "mask", + "type": "MASK", + "link": 246 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 233 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "623b5913dc7f240fd8b26422e99f8849a21c5473", + "Node name for S&R": "DrawMaskOnImage", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "0, 0, 0", + "cpu" + ] + }, + { + "id": 146, + "type": "GetNode", + "pos": [ + 1086.932861328125, + -2184.602783203125 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 10, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 250 + ] + } + ], + "title": "Get_input_video", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "input_video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 130, + "type": "SetNode", + "pos": [ + 1294.1051025390625, + -2211.976806640625 + ], + "size": [ + 211.05747985839844, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 67, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 233 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 251 + ] + } + ], + "title": "Set_background_image", + "properties": { + "previousName": "background_image", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "background_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 163, + "type": "GetNode", + "pos": [ + 1571.3441162109375, + -468.3181457519531 + ], + "size": [ + 210, + 34 + ], + "flags": { + "collapsed": true + }, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "WANVAE", + "type": "WANVAE", + "links": [ + 280 + ] + } + ], + "title": "Get_VAE", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 71, + "type": "CLIPVisionLoader", + "pos": [ + -512.5712890625, + -245.34796142578125 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.59", + "Node name for S&R": "CLIPVisionLoader", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "clip_vision_h.safetensors" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 70, + "type": "WanVideoClipVisionEncode", + "pos": [ + 989.1386108398438, + -306.2594299316406 + ], + "size": [ + 280.9771423339844, + 262 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image_1", + "type": "IMAGE", + "link": 235 + }, + { + "name": "image_2", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "negative_image", + "shape": 7, + "type": "IMAGE", + "link": null + } + ], + "outputs": [ + { + "name": "image_embeds", + "type": "WANVIDIMAGE_CLIPEMBEDS", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "2bdd81a10b03c14443c79bdf3b783b1feb3d1fa3", + "Node name for S&R": "WanVideoClipVisionEncode", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 1, + 1, + "center", + "average", + true, + 0, + 0.5 + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 133, + "type": "GetNode", + "pos": [ + 995.1069946289062, + -382.21319580078125 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 235, + 282 + ] + } + ], + "title": "Get_reference_image", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "reference_image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 141, + "type": "GetNode", + "pos": [ + 1320.70703125, + -392.9727478027344 + ], + "size": [ + 210, + 34 + ], + "flags": { + "collapsed": true + }, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 244 + ] + } + ], + "title": "Get_pose_images", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 35, + "type": "WanVideoTorchCompileSettings", + "pos": [ + -1146.0887451171875, + -909.5626831054688 + ], + "size": [ + 390.5999755859375, + 250 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "torch_compile_args", + "type": "WANCOMPILEARGS", + "slot_index": 0, + "links": [ + 100 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoTorchCompileSettings", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "inductor", + false, + "default", + false, + 64, + true, + 128, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 38, + "type": "WanVideoVAELoader", + "pos": [ + -569.7952880859375, + -499.9053649902344 + ], + "size": [ + 416.25482177734375, + 130 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": null + } + ], + "outputs": [ + { + "name": "vae", + "type": "WANVAE", + "slot_index": 0, + "links": [ + 278 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoVAELoader", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "wanvideo\\Wan2_1_VAE_bf16.safetensors", + "bf16", + false, + false + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 161, + "type": "SetNode", + "pos": [ + -259.3302001953125, + -357.3097839355469 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "WANVAE", + "type": "WANVAE", + "link": 278 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null + } + ], + "title": "Set_VAE", + "properties": { + "previousName": "VAE", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 77, + "type": "ImageConcatMulti", + "pos": [ + 2653.37939453125, + -1220.90087890625 + ], + "size": [ + 270, + 190 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": 236 + }, + { + "name": "image_2", + "shape": 7, + "type": "IMAGE", + "link": 239 + }, + { + "name": "image_3", + "shape": 7, + "type": "IMAGE", + "link": 243 + }, + { + "name": "image_4", + "shape": 7, + "type": "IMAGE", + "link": 249 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 107 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "468fcc86f0b29e79a8510e8239eb15714d6747a6", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 4, + "down", + true, + null + ] + }, + { + "id": 145, + "type": "GetNode", + "pos": [ + 2463.91552734375, + -1073.7054443359375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 249 + ] + } + ], + "title": "Get_input_video", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "input_video" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 162, + "type": "GetNode", + "pos": [ + 2483.826171875, + -523.2938842773438 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "WANVAE", + "type": "WANVAE", + "links": [ + 279 + ] + } + ], + "title": "Get_VAE", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "VAE" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 42, + "type": "GetImageSizeAndCount", + "pos": [ + 2555.719482421875, + -692.73095703125 + ], + "size": [ + 277.20001220703125, + 86 + ], + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 323 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 87 + ] + }, + { + "label": "width", + "name": "width", + "type": "INT", + "links": null + }, + { + "label": "height", + "name": "height", + "type": "INT", + "links": null + }, + { + "label": "count", + "name": "count", + "type": "INT", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "36f6fdd7d4c393675ac622bd300ef667ee65d8b8", + "Node name for S&R": "GetImageSizeAndCount", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [] + }, + { + "id": 110, + "type": "WanVideoContextOptions", + "pos": [ + 1829.0579833984375, + -868.31298828125 + ], + "size": [ + 275.783203125, + 202 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "reference_latent", + "shape": 7, + "type": "LATENT", + "link": null + } + ], + "outputs": [ + { + "name": "context_options", + "type": "WANVIDCONTEXT", + "links": [] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "2bdd81a10b03c14443c79bdf3b783b1feb3d1fa3", + "Node name for S&R": "WanVideoContextOptions", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "static_standard", + 81, + 4, + 32, + true, + false, + "linear" + ] + }, + { + "id": 65, + "type": "WanVideoTextEncodeCached", + "pos": [ + 1329.64208984375, + 68.953125 + ], + "size": [ + 488.4488220214844, + 362.56817626953125 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "extender_args", + "shape": 7, + "type": "WANVIDEOPROMPTEXTENDER_ARGS", + "link": null + } + ], + "outputs": [ + { + "name": "text_embeds", + "type": "WANVIDEOTEXTEMBEDS", + "links": [ + 281 + ] + }, + { + "name": "negative_text_embeds", + "type": "WANVIDEOTEXTEMBEDS", + "links": null + }, + { + "name": "positive_prompt", + "type": "STRING", + "links": null + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "761b1d191e50d589465e31dc0d40ff7c59b1b7b0", + "Node name for S&R": "WanVideoTextEncodeCached", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "umt5-xxl-enc-bf16.safetensors", + "bf16", + "man is walking, style is soft 3D render style, night time, moonlight", + "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走", + "disabled", + false, + "gpu" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 169, + "type": "Note", + "pos": [ + 1282.7142333984375, + -825.0780029296875 + ], + "size": [ + 484.7705383300781, + 194.427978515625 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "For long generations you can use either context options, or the original long gen method, which is activated when frame_window_size is smaller than total frames(num_frames). When using this method the window size stays constant, extra frames are generated in the case the last window is cut short.\n\nWhen using context options, set num_frames and frame_window_size to be equal\n\nOtherwise set frame_window_size to something the model is capable of doing normally, original default is 77, 81 seems to work too.\n\nOriginal method is probably better for motion etc, and is faster, but the benefit of context windows is that it doesn't degrade over time on long clips." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 22, + "type": "WanVideoModelLoader", + "pos": [ + -700.9990844726562, + -908.7992553710938 + ], + "size": [ + 601.9249267578125, + 318 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": 100 + }, + { + "name": "block_swap_args", + "shape": 7, + "type": "BLOCKSWAPARGS", + "link": null + }, + { + "name": "lora", + "shape": 7, + "type": "WANVIDLORA", + "link": null + }, + { + "name": "vram_management_args", + "shape": 7, + "type": "VRAM_MANAGEMENTARGS", + "link": null + }, + { + "name": "extra_model", + "shape": 7, + "type": "VACEPATH", + "link": null + }, + { + "name": "fantasytalking_model", + "shape": 7, + "type": "FANTASYTALKINGMODEL", + "link": null + }, + { + "name": "multitalk_model", + "shape": 7, + "type": "MULTITALKMODEL", + "link": null + }, + { + "name": "fantasyportrait_model", + "shape": 7, + "type": "FANTASYPORTRAITMODEL", + "link": null + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "slot_index": 0, + "links": [ + 59 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoModelLoader", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "WanVideo\\2_2\\Wan2_2-Animate-14B_fp8_e4m3fn_scaled_KJ.safetensors", + "fp16_fast", + "disabled", + "offload_device", + "sageattn", + "default" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 151, + "type": "INTConstant", + "pos": [ + -1704.931640625, + -2640.54638671875 + ], + "size": [ + 210, + 58 + ], + "flags": { + "collapsed": false + }, + "order": 22, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 258, + 264, + 287 + ] + } + ], + "title": "Height", + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "37659859825cea55940a58110525795ce5deb8be", + "Node name for S&R": "INTConstant", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 480 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 51, + "type": "WanVideoBlockSwap", + "pos": [ + -20.855491638183594, + -308.51751708984375 + ], + "size": [ + 281.404296875, + 202 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "block_swap_args", + "type": "BLOCKSWAPARGS", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoBlockSwap", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 25, + false, + false, + true, + 0, + 1, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 50, + "type": "WanVideoSetBlockSwap", + "pos": [ + 299.8666076660156, + -422.0509338378906 + ], + "size": [ + 209.6841796875, + 46 + ], + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 62 + }, + { + "name": "block_swap_args", + "shape": 7, + "type": "BLOCKSWAPARGS", + "link": 64 + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "links": [ + 63 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoSetBlockSwap", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 48, + "type": "WanVideoSetLoRAs", + "pos": [ + -5.091688632965088, + -420.6256103515625 + ], + "size": [ + 178.5533203125, + 46 + ], + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 59 + }, + { + "name": "lora", + "shape": 7, + "type": "WANVIDLORA", + "link": 289 + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "links": [ + 62 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoSetLoRAs", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 171, + "type": "WanVideoLoraSelectMulti", + "pos": [ + 3.4800829887390137, + -932.2160034179688 + ], + "size": [ + 583.3719482421875, + 342 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "prev_lora", + "shape": 7, + "type": "WANVIDLORA", + "link": null + }, + { + "name": "blocks", + "shape": 7, + "type": "SELECTEDBLOCKS", + "link": null + } + ], + "outputs": [ + { + "name": "lora", + "type": "WANVIDLORA", + "links": [ + 289 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "b2c8cf969fcf60a38884ea2c29af177ae1f28b29", + "Node name for S&R": "WanVideoLoraSelectMulti", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "WanVideo\\WanAnimate_relight_lora_fp16.safetensors", + 1, + "WanVideo\\Lightx2v\\lightx2v_I2V_14B_480p_cfg_step_distill_rank64_bf16.safetensors", + 1.2, + "none", + 1, + "none", + 1, + "none", + 1, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 182, + "type": "GrowMaskWithBlur", + "pos": [ + 431.7315368652344, + -2222.123779296875 + ], + "size": [ + 292.748046875, + 246 + ], + "flags": {}, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 314 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 315 + ] + }, + { + "name": "mask_inverted", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "bb205d809b467307b8ec3bb1a22680a4873187f8", + "Node name for S&R": "GrowMaskWithBlur", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 10, + 0, + true, + false, + 0, + 1, + 1, + false + ] + }, + { + "id": 108, + "type": "BlockifyMask", + "pos": [ + 779.2421264648438, + -2222.2099609375 + ], + "size": [ + 270, + 82 + ], + "flags": {}, + "order": 62, + "mode": 0, + "inputs": [ + { + "name": "masks", + "type": "MASK", + "link": 315 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 245 + ] + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "00da1910634fbf314d407608efb281ae6f7f1ba2", + "Node name for S&R": "BlockifyMask", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 32, + "cpu" + ] + }, + { + "id": 127, + "type": "Note", + "pos": [ + 983.2726440429688, + -2067.319580078125 + ], + "size": [ + 210, + 88 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "These are new nodes in KJNodes" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 104, + "type": "Sam2Segmentation", + "pos": [ + 83.82372283935547, + -2220.965576171875 + ], + "size": [ + 272.087890625, + 182 + ], + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "sam2_model", + "type": "SAM2MODEL", + "link": 185 + }, + { + "name": "image", + "type": "IMAGE", + "link": 316 + }, + { + "name": "coordinates_positive", + "shape": 7, + "type": "STRING", + "link": null + }, + { + "name": "coordinates_negative", + "shape": 7, + "type": "STRING", + "link": null + }, + { + "name": "bboxes", + "shape": 7, + "type": "BBOX", + "link": 321 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 314 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-segment-anything-2", + "ver": "c59676b008a76237002926f684d0ca3a9b29ac54", + "Node name for S&R": "Sam2Segmentation", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + false, + false + ] + }, + { + "id": 180, + "type": "GetImageSizeAndCount", + "pos": [ + -246.24586486816406, + -3011.47705078125 + ], + "size": [ + 196.2994140625, + 86 + ], + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 306 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 309, + 316 + ] + }, + { + "label": "832 width", + "name": "width", + "type": "INT", + "links": [ + 307, + 310 + ] + }, + { + "label": "480 height", + "name": "height", + "type": "INT", + "links": [ + 308, + 311 + ] + }, + { + "label": "109 count", + "name": "count", + "type": "INT", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "bb205d809b467307b8ec3bb1a22680a4873187f8", + "Node name for S&R": "GetImageSizeAndCount", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [] + }, + { + "id": 173, + "type": "DrawViTPose", + "pos": [ + 120.56317138671875, + -2811.226318359375 + ], + "size": [ + 270, + 178 + ], + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 294 + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 307 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 308 + } + ], + "outputs": [ + { + "name": "pose_images", + "type": "IMAGE", + "links": [] + } + ], + "properties": { + "cnr_id": "comfyui-wananimatepreprocess", + "ver": "e63d6e71ae4c271f3f81211a7ca7f87607b7e50d", + "Node name for S&R": "DrawViTPose", + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 832, + 480, + 16, + -1, + -1, + "True" + ] + }, + { + "id": 75, + "type": "VHS_VideoCombine", + "pos": [ + 2010.67431640625, + -3041.57568359375 + ], + "size": [ + 743.6680297851562, + 334 + ], + "flags": {}, + "order": 68, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 251 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_VideoCombine", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "WanVideo2_1_T2V", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "WanVideo2_1_T2V_00004.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "WanVideo2_1_T2V_00004.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\WanVideo2_1_T2V_00004.mp4" + } + } + } + }, + { + "id": 181, + "type": "VHS_VideoCombine", + "pos": [ + 1137.4183349609375, + -3026.80517578125 + ], + "size": [ + 743.6680297851562, + 334 + ], + "flags": {}, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 320 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_VideoCombine", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "WanVideo2_1_T2V", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "WanVideo2_1_T2V_00002.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "WanVideo2_1_T2V_00002.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\WanVideo2_1_T2V_00002.mp4" + } + } + } + }, + { + "id": 174, + "type": "VHS_VideoCombine", + "pos": [ + 714.7760009765625, + -3057.50341796875 + ], + "size": [ + 220.7333984375, + 334 + ], + "flags": {}, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 318 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "0edce8ef7ce173ac97a3ed3d6f4636029d1a4530", + "Node name for S&R": "VHS_VideoCombine", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "vitpose", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "vitpose_00004.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "vitpose_00004.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\vitpose_00004.mp4" + } + } + } + }, + { + "id": 183, + "type": "SetNode", + "pos": [ + 464.6632080078125, + -3038.54296875 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 317 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 318 + ] + } + ], + "title": "Set_face_images", + "properties": { + "previousName": "face_images", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 184, + "type": "SetNode", + "pos": [ + 442.45526123046875, + -2784.734375 + ], + "size": [ + 210, + 60 + ], + "flags": { + "collapsed": true + }, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "link": 325 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 320 + ] + } + ], + "title": "Set_pose_images", + "properties": { + "previousName": "pose_images", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "pose_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 138, + "type": "GetNode", + "pos": [ + 1319.8330078125, + -314.357421875 + ], + "size": [ + 210, + 50 + ], + "flags": { + "collapsed": true + }, + "order": 26, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 240 + ] + } + ], + "title": "Get_face_images", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "face_images" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 102, + "type": "DownloadAndLoadSAM2Model", + "pos": [ + -470.4329528808594, + -2221.738037109375 + ], + "size": [ + 334.4137268066406, + 130 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "sam2_model", + "type": "SAM2MODEL", + "links": [ + 185 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-segment-anything-2", + "ver": "c59676b008a76237002926f684d0ca3a9b29ac54", + "Node name for S&R": "DownloadAndLoadSAM2Model", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "sam2.1_hiera_base_plus.safetensors", + "video", + "cuda", + "fp16" + ] + }, + { + "id": 185, + "type": "Note", + "pos": [ + 257.0601806640625, + -2465.42041015625 + ], + "size": [ + 236.14007568359375, + 88 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note: SAM2 detection", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "You can use either the detected bbox or the kay_frame_body_points to positive coordinates, if one fails to creater proper mask, try the other" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 150, + "type": "INTConstant", + "pos": [ + -1695.39013671875, + -2773.4970703125 + ], + "size": [ + 210, + 58 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 257, + 263, + 286 + ] + } + ], + "title": "Width", + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "37659859825cea55940a58110525795ce5deb8be", + "Node name for S&R": "INTConstant", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 832 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 27, + "type": "WanVideoSampler", + "pos": [ + 2107.4375, + -539.1071166992188 + ], + "size": [ + 315, + 874.1923217773438 + ], + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 63 + }, + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "link": 84 + }, + { + "name": "text_embeds", + "shape": 7, + "type": "WANVIDEOTEXTEMBEDS", + "link": 281 + }, + { + "name": "samples", + "shape": 7, + "type": "LATENT", + "link": null + }, + { + "name": "feta_args", + "shape": 7, + "type": "FETAARGS", + "link": null + }, + { + "name": "context_options", + "shape": 7, + "type": "WANVIDCONTEXT", + "link": null + }, + { + "name": "cache_args", + "shape": 7, + "type": "CACHEARGS", + "link": null + }, + { + "name": "flowedit_args", + "shape": 7, + "type": "FLOWEDITARGS", + "link": null + }, + { + "name": "slg_args", + "shape": 7, + "type": "SLGARGS", + "link": null + }, + { + "name": "loop_args", + "shape": 7, + "type": "LOOPARGS", + "link": null + }, + { + "name": "experimental_args", + "shape": 7, + "type": "EXPERIMENTALARGS", + "link": null + }, + { + "name": "sigmas", + "shape": 7, + "type": "SIGMAS", + "link": null + }, + { + "name": "unianimate_poses", + "shape": 7, + "type": "UNIANIMATE_POSE", + "link": null + }, + { + "name": "fantasytalking_embeds", + "shape": 7, + "type": "FANTASYTALKING_EMBEDS", + "link": null + }, + { + "name": "uni3c_embeds", + "shape": 7, + "type": "UNI3C_EMBEDS", + "link": null + }, + { + "name": "multitalk_embeds", + "shape": 7, + "type": "MULTITALK_EMBEDS", + "link": null + }, + { + "name": "freeinit_args", + "shape": 7, + "type": "FREEINITARGS", + "link": null + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "slot_index": 0, + "links": [ + 33 + ] + }, + { + "name": "denoised_samples", + "type": "LATENT", + "links": null + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoSampler", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 4, + 1, + 5, + 42, + "fixed", + true, + "dpm++_sde", + 0, + 1, + "", + "comfy", + 0, + -1, + false + ] + }, + { + "id": 30, + "type": "VHS_VideoCombine", + "pos": [ + 3064.7763671875, + -1021.487548828125 + ], + "size": [ + 1478.035400390625, + 334 + ], + "flags": {}, + "order": 65, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 89 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": 256 + }, + { + "name": "meta_batch", + "shape": 7, + "type": "VHS_BatchManager", + "link": null + }, + { + "name": "vae", + "shape": 7, + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-videohelpersuite", + "ver": "8e4d79471bf1952154768e8435a9300077b534fa", + "Node name for S&R": "VHS_VideoCombine", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "Wanimate", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": true, + "pingpong": false, + "save_output": true, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "Wanimate_00002-audio.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16, + "workflow": "Wanimate_00002.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\Wanimate_00002-audio.mp4" + } + } + } + }, + { + "id": 164, + "type": "MarkdownNote", + "pos": [ + -1191.340087890625, + -515.903564453125 + ], + "size": [ + 541.7905883789062, + 240.68362426757812 + ], + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Markdown Note: Model Links", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "Model links:\n\n[https://huggingface.co/Kijai/WanVideo_comfy_fp8_scaled/tree/main/Wan22Animate](https://huggingface.co/Kijai/WanVideo_comfy_fp8_scaled/tree/main/Wan22Animate)\n\nLoRA:\n\n[https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Lightx2v](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Lightx2v)\n\n[https://huggingface.co/Kijai/WanVideo_comfy/tree/main/LoRAs/Wan22_relight](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/LoRAs/Wan22_relight)\n\nText encoder:\n\n[https://huggingface.co/Kijai/WanVideo_comfy/blob/main/umt5-xxl-enc-bf16.safetensors](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/umt5-xxl-enc-bf16.safetensors)\n\nVAE:\n\n[https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan2_1_VAE_bf16.safetensors](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan2_1_VAE_bf16.safetensors)" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 177, + "type": "MarkdownNote", + "pos": [ + -1088.8204345703125, + -2393.302978515625 + ], + "size": [ + 536.27783203125, + 330.03546142578125 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Preprocessor links", + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "Nodes:\n\n[https://github.com/kijai/ComfyUI-WanAnimatePreprocess](https://github.com/kijai/ComfyUI-WanAnimatePreprocess)\n\nModels:\n\nYOLO:\n\n[https://huggingface.co/Wan-AI/Wan2.2-Animate-14B/blob/main/process_checkpoint/det/yolov10m.onnx](https://huggingface.co/Wan-AI/Wan2.2-Animate-14B/blob/main/process_checkpoint/det/yolov10m.onnx)\n\nViTPose\n\nLarge:\n\n[https://huggingface.co/JunkyByte/easy_ViTPose/blob/main/onnx/wholebody/vitpose-l-wholebody.onnx](https://huggingface.co/JunkyByte/easy_ViTPose/blob/main/onnx/wholebody/vitpose-l-wholebody.onnx)\n\nHuge (needs both files):\n\n[https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_model.onnx](https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_model.onnx)\n\n[https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_data.bin](https://huggingface.co/Kijai/vitpose_comfy/blob/main/onnx/vitpose_h_wholebody_data.bin)" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 62, + "type": "WanVideoAnimateEmbeds", + "pos": [ + 1567.48046875, + -396.0700988769531 + ], + "size": [ + 274.2164001464844, + 370 + ], + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "WANVAE", + "link": 280 + }, + { + "name": "clip_embeds", + "shape": 7, + "type": "WANVIDIMAGE_CLIPEMBEDS", + "link": 96 + }, + { + "name": "ref_images", + "shape": 7, + "type": "IMAGE", + "link": 282 + }, + { + "name": "pose_images", + "shape": 7, + "type": "IMAGE", + "link": 244 + }, + { + "name": "face_images", + "shape": 7, + "type": "IMAGE", + "link": 240 + }, + { + "name": "bg_images", + "shape": 7, + "type": "IMAGE", + "link": 234 + }, + { + "name": "mask", + "shape": 7, + "type": "MASK", + "link": 247 + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 265 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 266 + }, + { + "name": "num_frames", + "type": "INT", + "widget": { + "name": "num_frames" + }, + "link": 268 + } + ], + "outputs": [ + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "links": [ + 84 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "761b1d191e50d589465e31dc0d40ff7c59b1b7b0", + "Node name for S&R": "WanVideoAnimateEmbeds", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true, + "num_frames": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 832, + 480, + 501, + false, + 81, + "disabled", + 1, + 1, + false + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 28, + "type": "WanVideoDecode", + "pos": [ + 2485.57177734375, + -441.313720703125 + ], + "size": [ + 280.3896179199219, + 198 + ], + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "WANVAE", + "link": 279 + }, + { + "name": "samples", + "type": "LATENT", + "link": 33 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 322 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "e5ef9752a7e846b232fc05fd993327a2e870a788", + "Node name for S&R": "WanVideoDecode", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + false, + 272, + 272, + 144, + 128, + "default" + ] + }, + { + "id": 178, + "type": "OnnxDetectionModelLoader", + "pos": [ + -506.44635009765625, + -2398.935302734375 + ], + "size": [ + 351.52410888671875, + 106 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "model", + "type": "POSEMODEL", + "links": [ + 290 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wananimatepreprocess", + "ver": "e63d6e71ae4c271f3f81211a7ca7f87607b7e50d", + "Node name for S&R": "OnnxDetectionModelLoader", + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "vitpose_h_wholebody_model.onnx", + "onnx\\yolov10m.onnx", + "CUDAExecutionProvider" + ] + }, + { + "id": 172, + "type": "PoseAndFaceDetection", + "pos": [ + 104.5530014038086, + -3028.416015625 + ], + "size": [ + 313.125, + 162 + ], + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "POSEMODEL", + "link": 290 + }, + { + "name": "images", + "type": "IMAGE", + "link": 309 + }, + { + "name": "retarget_image", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 310 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 311 + } + ], + "outputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "links": [ + 294, + 324 + ] + }, + { + "name": "face_images", + "type": "IMAGE", + "links": [ + 317 + ] + }, + { + "name": "key_frame_body_points", + "type": "STRING", + "links": null + }, + { + "name": "bboxes", + "type": "BBOX", + "links": [ + 321 + ] + }, + { + "name": "face_bboxes", + "type": "BBOX,", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-wananimatepreprocess", + "ver": "e63d6e71ae4c271f3f81211a7ca7f87607b7e50d", + "Node name for S&R": "PoseAndFaceDetection", + "aux_id": "kijai/ComfyUI-WanAnimatePreprocess", + "ue_properties": { + "widget_ue_connectable": { + "width": true, + "height": true + }, + "version": "7.7", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 832, + 480 + ] + }, + { + "id": 188, + "type": "TSPoseDataSmoother", + "pos": [ + -203.98312003492865, + -2818.662795484184 + ], + "size": [ + 271.1328125, + 198 + ], + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "pose_data", + "type": "POSEDATA", + "link": 324 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 325 + ] + }, + { + "name": "pose_data", + "type": "POSEDATA", + "links": null + } + ], + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + }, + "Node name for S&R": "TSPoseDataSmoother" + }, + "widgets_values": [ + true, + 0.7, + 12, + 3, + 0.35, + 0.6 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 186, + "type": "TSColorMatch", + "pos": [ + 2819.4147798730037, + -437.09844942643934 + ], + "size": [ + 270, + 58 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 322 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 323 + ] + } + ], + "properties": { + "ue_properties": { + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + }, + "Node name for S&R": "TSColorMatch" + }, + "widgets_values": [ + 81 + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 33, + 27, + 0, + 28, + 1, + "LATENT" + ], + [ + 59, + 22, + 0, + 48, + 0, + "WANVIDEOMODEL" + ], + [ + 62, + 48, + 0, + 50, + 0, + "WANVIDEOMODEL" + ], + [ + 63, + 50, + 0, + 27, + 0, + "WANVIDEOMODEL" + ], + [ + 64, + 51, + 0, + 50, + 1, + "BLOCKSWAPARGS" + ], + [ + 82, + 57, + 0, + 64, + 0, + "IMAGE" + ], + [ + 84, + 62, + 0, + 27, + 1, + "WANVIDIMAGE_EMBEDS" + ], + [ + 87, + 42, + 0, + 66, + 0, + "IMAGE" + ], + [ + 89, + 66, + 0, + 30, + 0, + "IMAGE" + ], + [ + 96, + 70, + 0, + 62, + 1, + "WANVIDIMAGE_CLIPEMBEDS" + ], + [ + 97, + 71, + 0, + 70, + 0, + "CLIP_VISION" + ], + [ + 100, + 35, + 0, + 22, + 0, + "WANCOMPILEARGS" + ], + [ + 107, + 77, + 0, + 66, + 1, + "IMAGE" + ], + [ + 185, + 102, + 0, + 104, + 0, + "SAM2MODEL" + ], + [ + 231, + 64, + 0, + 128, + 0, + "*" + ], + [ + 233, + 99, + 0, + 130, + 0, + "*" + ], + [ + 234, + 131, + 0, + 62, + 5, + "IMAGE" + ], + [ + 235, + 133, + 0, + 70, + 1, + "IMAGE" + ], + [ + 236, + 134, + 0, + 77, + 0, + "IMAGE" + ], + [ + 239, + 137, + 0, + 77, + 1, + "IMAGE" + ], + [ + 240, + 138, + 0, + 62, + 4, + "IMAGE" + ], + [ + 243, + 140, + 0, + 77, + 2, + "IMAGE" + ], + [ + 244, + 141, + 0, + 62, + 3, + "IMAGE" + ], + [ + 245, + 108, + 0, + 142, + 0, + "*" + ], + [ + 246, + 142, + 0, + 99, + 1, + "MASK" + ], + [ + 247, + 143, + 0, + 62, + 6, + "MASK" + ], + [ + 248, + 63, + 0, + 144, + 0, + "*" + ], + [ + 249, + 145, + 0, + 77, + 3, + "IMAGE" + ], + [ + 250, + 146, + 0, + 99, + 0, + "IMAGE" + ], + [ + 251, + 130, + 0, + 75, + 0, + "IMAGE" + ], + [ + 255, + 63, + 2, + 148, + 0, + "*" + ], + [ + 256, + 149, + 0, + 30, + 1, + "AUDIO" + ], + [ + 257, + 150, + 0, + 63, + 2, + "INT" + ], + [ + 258, + 151, + 0, + 63, + 3, + "INT" + ], + [ + 263, + 150, + 0, + 153, + 0, + "*" + ], + [ + 264, + 151, + 0, + 154, + 0, + "*" + ], + [ + 265, + 155, + 0, + 62, + 7, + "INT" + ], + [ + 266, + 156, + 0, + 62, + 8, + "INT" + ], + [ + 267, + 63, + 1, + 157, + 0, + "*" + ], + [ + 268, + 158, + 0, + 62, + 9, + "INT" + ], + [ + 278, + 38, + 0, + 161, + 0, + "*" + ], + [ + 279, + 162, + 0, + 28, + 0, + "WANVAE" + ], + [ + 280, + 163, + 0, + 62, + 0, + "WANVAE" + ], + [ + 281, + 65, + 0, + 27, + 2, + "WANVIDEOTEXTEMBEDS" + ], + [ + 282, + 133, + 0, + 62, + 2, + "IMAGE" + ], + [ + 286, + 150, + 0, + 64, + 2, + "INT" + ], + [ + 287, + 151, + 0, + 64, + 3, + "INT" + ], + [ + 289, + 171, + 0, + 48, + 1, + "WANVIDLORA" + ], + [ + 290, + 178, + 0, + 172, + 0, + "POSEMODEL" + ], + [ + 294, + 172, + 0, + 173, + 0, + "POSEDATA" + ], + [ + 306, + 144, + 0, + 180, + 0, + "IMAGE" + ], + [ + 307, + 180, + 1, + 173, + 1, + "INT" + ], + [ + 308, + 180, + 2, + 173, + 2, + "INT" + ], + [ + 309, + 180, + 0, + 172, + 1, + "IMAGE" + ], + [ + 310, + 180, + 1, + 172, + 3, + "INT" + ], + [ + 311, + 180, + 2, + 172, + 4, + "INT" + ], + [ + 314, + 104, + 0, + 182, + 0, + "MASK" + ], + [ + 315, + 182, + 0, + 108, + 0, + "MASK" + ], + [ + 316, + 180, + 0, + 104, + 1, + "IMAGE" + ], + [ + 317, + 172, + 1, + 183, + 0, + "*" + ], + [ + 318, + 183, + 0, + 174, + 0, + "IMAGE" + ], + [ + 320, + 184, + 0, + 181, + 0, + "IMAGE" + ], + [ + 321, + 172, + 3, + 104, + 4, + "BBOX" + ], + [ + 322, + 28, + 0, + 186, + 0, + "IMAGE" + ], + [ + 323, + 186, + 0, + 42, + 0, + "IMAGE" + ], + [ + 324, + 172, + 0, + 188, + 0, + "POSEDATA" + ], + [ + 325, + 188, + 0, + 184, + 0, + "IMAGE" + ] + ], + "groups": [ + { + "id": 1, + "title": "Reference Image", + "bounding": [ + -1209.306884765625, + -1833.3065185546875, + 990.079833984375, + 724.450439453125 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Preprocessing", + "bounding": [ + -1227.062744140625, + -3202.685302734375, + 4104.810546875, + 1281.6610107421875 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Models", + "bounding": [ + -1224.449951171875, + -1055.772705078125, + 2156.392578125, + 1012.5536499023438 + ], + "color": "#88A", + "font_size": 24, + "flags": {} + }, + { + "id": 5, + "title": "Result collage", + "bounding": [ + 2370.66357421875, + -1369.016845703125, + 629.7467041015625, + 605.8086547851562 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 0.1, + "offset": [ + 9082.927464733464, + 5706.781227099167 + ] + }, + "frontendVersion": "1.34.9", + "node_versions": { + "ComfyUI-WanVideoWrapper": "5a2383621a05825d0d0437781afcb8552d9590fd", + "ComfyUI-KJNodes": "a5bd3c86c8ed6b83c55c2d0e7a59515b15a0137f", + "ComfyUI-VideoHelperSuite": "0a75c7958fe320efcb052f1d9f8451fd20c730a8" + }, + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true, + "workflowRendererVersion": "LG", + "ue_links": [] + }, + "version": 0.4 +} \ No newline at end of file diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/color_match.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/color_match.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f33f1c5615b2952c779920ee09c0de5df7eb79e Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/color_match.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/load_video_batch.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/load_video_batch.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da4f3fbc564e8096d9b6b2d022a33b38f358669c Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/load_video_batch.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/openpose_smoother.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/openpose_smoother.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..162c25dafdf5677748090438cf8ab2390cc28437 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/openpose_smoother.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/preview_image_metadata.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/preview_image_metadata.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..982c0eea82340f383b773f3c7a6bdfad44ea05b0 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/preview_image_metadata.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/rename_files.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/rename_files.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eeeb22d63866178b246ae57d68358bc04c55fa4 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/rename_files.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/save_load_pose.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/save_load_pose.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bacfdddc374f11a121cd3d25a2111085403f01a2 Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/save_load_pose.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/video_combine_metadata.cpython-313.pyc b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/video_combine_metadata.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..357e8837ae24db55cf580f0605066d90551eca0b Binary files /dev/null and b/zavodik/nodes/comfyui-teskors-utils/nodes/__pycache__/video_combine_metadata.cpython-313.pyc differ diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/color_match.py b/zavodik/nodes/comfyui-teskors-utils/nodes/color_match.py new file mode 100644 index 0000000000000000000000000000000000000000..db7af18f735017751047f33112fdce37067365dd --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/color_match.py @@ -0,0 +1,191 @@ +import os +import uuid +import shutil +import cv2 +import torch +import numpy as np + +import folder_paths + + +class TSColorMatchSequentialBias: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "chunk_size": ("INT", {"default": 81, "min": 1, "max": 99999, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("IMAGE",) + FUNCTION = "process" + CATEGORY = "TS_Nodes/Video" + + EPS = 1e-6 + + FPS = 30 + HEAD_WIN = 1 + TAIL_WIN = 1 + SAVE_TEMP_CHUNKS = True + DEBUG = False + + @staticmethod + def _rgb01_to_bgr8(rgb01: np.ndarray) -> np.ndarray: + out = np.clip(rgb01 * 255.0, 0.0, 255.0).astype(np.uint8) + return cv2.cvtColor(out, cv2.COLOR_RGB2BGR) + + @staticmethod + def _bgr8_to_rgb01(bgr8: np.ndarray) -> np.ndarray: + rgb = cv2.cvtColor(bgr8, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0 + return rgb + + @staticmethod + def _sample_rgb01_all_pixels_from_bgr8(frame_bgr8: np.ndarray) -> np.ndarray: + small = cv2.resize(frame_bgr8, (480, 270), interpolation=cv2.INTER_AREA) + rgb01 = cv2.cvtColor(small, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0 + return rgb01.reshape(-1, 3) + + def _compute_stats(self, frames_bgr8: list[np.ndarray]) -> tuple[np.ndarray, np.ndarray]: + pxs = [self._sample_rgb01_all_pixels_from_bgr8(fr) for fr in frames_bgr8] + px = np.concatenate(pxs, axis=0) + mean = px.mean(axis=0).astype(np.float32) + std = px.std(axis=0).astype(np.float32) + return mean, std + + def _apply_color_transfer_rgb01( + self, + frame_bgr8: np.ndarray, + target_mean: np.ndarray, + target_std: np.ndarray, + source_mean: np.ndarray, + source_std: np.ndarray, + ) -> np.ndarray: + rgb01 = self._bgr8_to_rgb01(frame_bgr8) + scale = target_std / (source_std + self.EPS) + out = (rgb01 - source_mean) * scale + target_mean + return np.clip(out, 0.0, 1.0) + + @staticmethod + def _ensure_mp4v_writer(path: str, fps: int, w: int, h: int) -> cv2.VideoWriter: + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + vw = cv2.VideoWriter(path, fourcc, float(fps), (w, h)) + if not vw.isOpened(): + raise RuntimeError(f"Can't open VideoWriter for: {path}") + return vw + + def process(self, images, chunk_size: int): + images_np = images.detach().cpu().numpy() + n = int(images_np.shape[0]) if images_np is not None else 0 + if n <= 0: + return (images,) + + h = int(images_np.shape[1]) + w = int(images_np.shape[2]) + + out_dir = folder_paths.get_output_directory() + run_id = uuid.uuid4().hex[:10] + base_name = f"matched_sequential_transfer_{run_id}" + + tmp_dir = os.path.join(out_dir, f".tmp_{base_name}") + os.makedirs(tmp_dir, exist_ok=True) + + final_path_tmp = os.path.join(tmp_dir, f"{base_name}.mp4") + final_path = os.path.join(out_dir, f"{base_name}.mp4") + + out_final = None + out_frames_rgb01: list[np.ndarray] = [] + + fps = int(self.FPS) + head_win = int(self.HEAD_WIN) + tail_win = int(self.TAIL_WIN) + save_temp_chunks = bool(self.SAVE_TEMP_CHUNKS) + debug = bool(self.DEBUG) + + try: + if debug: + print("Processing images chunk by chunk and building final video directly...") + print(f" N={n}, chunk_size={chunk_size}, fps={fps}, size={w}x{h}") + print(f" save_temp_chunks={save_temp_chunks}, head_win={head_win}, tail_win={tail_win}") + + out_final = self._ensure_mp4v_writer(final_path_tmp, fps, w, h) + + ci = 0 + prev_modified_tail_bgr8: list[np.ndarray] = [] + + for start in range(0, n, chunk_size): + end = min(start + chunk_size, n) + length = end - start + + raw_frames_bgr8: list[np.ndarray] = [] + for i in range(start, end): + raw_rgb01 = images_np[i].astype(np.float32) + raw_frames_bgr8.append(self._rgb01_to_bgr8(raw_rgb01)) + + processed_frames_bgr8: list[np.ndarray] = [] + + if ci == 0: + processed_frames_bgr8 = raw_frames_bgr8 + if debug: + print(f"Chunk {ci}: Keep original (base for next)") + else: + target_m, target_s = self._compute_stats(prev_modified_tail_bgr8) + + h1 = min(head_win, length) + raw_head_frames_bgr8 = raw_frames_bgr8[:h1] + source_m, source_s = self._compute_stats(raw_head_frames_bgr8) + + if debug: + print(f"Chunk {ci-1}->{ci}:") + print(f" Target Mean: {target_m}, Std: {target_s}") + print(f" Source Mean: {source_m}, Std: {source_s}") + + for fr_bgr8 in raw_frames_bgr8: + rgb01 = self._apply_color_transfer_rgb01(fr_bgr8, target_m, target_s, source_m, source_s) + res_bgr8 = self._rgb01_to_bgr8(rgb01) + processed_frames_bgr8.append(res_bgr8) + + for fr_bgr8 in processed_frames_bgr8: + out_final.write(fr_bgr8) + out_frames_rgb01.append(self._bgr8_to_rgb01(fr_bgr8)) + + tw = min(tail_win, length) + tail_start = max(0, length - tw) + prev_modified_tail_bgr8 = processed_frames_bgr8[tail_start:] + + if save_temp_chunks: + chunk_filename_tmp = os.path.join(tmp_dir, f"chunk_{ci:03d}.mp4") + out_chunk = self._ensure_mp4v_writer(chunk_filename_tmp, fps, w, h) + for fr_bgr8 in processed_frames_bgr8: + out_chunk.write(fr_bgr8) + out_chunk.release() + if debug: + print(f"Saved TEMP chunk_{ci:03d}.mp4 with {length} frames.") + + ci += 1 + + out_final.release() + out_final = None + + shutil.move(final_path_tmp, final_path) + + if debug: + print("\nDONE. Saved final video:", final_path) + + finally: + if out_final is not None: + try: + out_final.release() + except Exception: + pass + + try: + if os.path.isdir(tmp_dir): + shutil.rmtree(tmp_dir, ignore_errors=True) + except Exception: + pass + + out_tensor = torch.from_numpy(np.stack(out_frames_rgb01, axis=0).astype(np.float32)) + return (out_tensor,) diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/load_video_batch.py b/zavodik/nodes/comfyui-teskors-utils/nodes/load_video_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..f418973dc6142460b04b81b62531cb1bb1e1954c --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/load_video_batch.py @@ -0,0 +1,359 @@ +import os +import re +import shutil +import subprocess +import time +from collections.abc import Mapping + +import torch +import numpy as np + +try: + import cv2 + + _has_cv2 = True +except Exception: + _has_cv2 = False + +ENCODE_ARGS = ("utf-8", "backslashreplace") + + +def _pick_ffmpeg_path(): + if "VHS_FORCE_FFMPEG_PATH" in os.environ: + p = os.environ.get("VHS_FORCE_FFMPEG_PATH") + if p: + return p + + system_ffmpeg = shutil.which("ffmpeg") + if system_ffmpeg is not None: + return system_ffmpeg + + if os.path.isfile("ffmpeg"): + return os.path.abspath("ffmpeg") + if os.path.isfile("ffmpeg.exe"): + return os.path.abspath("ffmpeg.exe") + + return None + + +ffmpeg_path = _pick_ffmpeg_path() + + +def get_audio(file, start_time=0, duration=0): + if ffmpeg_path is None: + raise Exception("ffmpeg not found. Put ffmpeg in PATH, or set VHS_FORCE_FFMPEG_PATH env var.") + + args = [ffmpeg_path, "-i", file] + if start_time > 0: + args += ["-ss", str(start_time)] + if duration > 0: + args += ["-t", str(duration)] + + try: + # как в utils: вытаскиваем raw f32le в stdout + res = subprocess.run(args + ["-f", "f32le", "-"], capture_output=True, check=True) + audio = torch.frombuffer(bytearray(res.stdout), dtype=torch.float32) + match = re.search(r", (\d+) Hz, (\w+), ", res.stderr.decode(*ENCODE_ARGS)) + except subprocess.CalledProcessError as e: + raise Exception(f"Failed to extract audio from {file}:\n" + e.stderr.decode(*ENCODE_ARGS)) + + if match: + ar = int(match.group(1)) + ac = {"mono": 1, "stereo": 2}.get(match.group(2), 2) + else: + ar = 44100 + ac = 2 + + if audio.numel() == 0: + empty = torch.zeros((1, 1, 0), dtype=torch.float32) + return {"waveform": empty, "sample_rate": ar} + + audio = audio.reshape((-1, ac)).transpose(0, 1).unsqueeze(0) + return {"waveform": audio, "sample_rate": ar} + + +class LazyAudioMap(Mapping): + def __init__(self, file, start_time, duration): + self.file = file + self.start_time = start_time + self.duration = duration + self._dict = None + + def _ensure(self): + if self._dict is None: + self._dict = get_audio(self.file, self.start_time, self.duration) + + def __getitem__(self, key): + self._ensure() + return self._dict[key] + + def __iter__(self): + self._ensure() + return iter(self._dict) + + def __len__(self): + self._ensure() + return len(self._dict) + + +def lazy_get_audio(file, start_time=0, duration=0, **kwargs): + return LazyAudioMap(file, start_time, duration) + + +def extract_first_number(s): + match = re.search(r"\d+", s) + return int(match.group()) if match else float("inf") + + +sort_methods = [ + "None", + "Alphabetical (ASC)", + "Alphabetical (DESC)", + "Numerical (ASC)", + "Numerical (DESC)", + "Datetime (ASC)", + "Datetime (DESC)", +] + + +def sort_by(items, base_path=".", method=None): + def fullpath(x): + return os.path.join(base_path, x) + + def get_timestamp(path): + try: + return os.path.getmtime(path) + except FileNotFoundError: + return float("-inf") + + if method == "Alphabetical (ASC)": + return sorted(items) + elif method == "Alphabetical (DESC)": + return sorted(items, reverse=True) + elif method == "Numerical (ASC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0])) + elif method == "Numerical (DESC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True) + elif method == "Datetime (ASC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x))) + elif method == "Datetime (DESC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True) + else: + return items + + +def target_size(width, height, custom_width, custom_height, downscale_ratio=8): + if downscale_ratio is None: + downscale_ratio = 8 + + if custom_width == 0 and custom_height == 0: + new_w, new_h = width, height + elif custom_height == 0: + new_h = int(height * (custom_width / width)) + new_w = int(custom_width) + elif custom_width == 0: + new_w = int(width * (custom_height / height)) + new_h = int(custom_height) + else: + new_w, new_h = int(custom_width), int(custom_height) + + new_w = int(new_w / downscale_ratio + 0.5) * downscale_ratio + new_h = int(new_h / downscale_ratio + 0.5) * downscale_ratio + return new_w, new_h + + +def _read_frames_vhs_like( + video_path: str, + force_rate: float = 0, + custom_width: int = 0, + custom_height: int = 0, + downscale_ratio: int = 8, + frame_load_cap: int = 0, + select_every_nth: int = 1, +): + + if select_every_nth is None or select_every_nth < 1: + select_every_nth = 1 + + if not _has_cv2: + raise RuntimeError("OpenCV (cv2) not available. Install opencv-python.") + + cap = cv2.VideoCapture(video_path) + if not cap.isOpened() or not cap.grab(): + raise FileNotFoundError(f"Cannot open video: {video_path}") + + fps = cap.get(cv2.CAP_PROP_FPS) + if fps is None or fps <= 0: + fps = 30.0 + + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + ok0, frame0 = cap.retrieve() + if not ok0 or frame0 is None: + cap.release() + raise RuntimeError(f"Cannot retrieve first frame from: {video_path}") + + if width <= 0 or height <= 0: + height, width = frame0.shape[:2] + + base_dt = 1.0 / float(fps) + target_dt = base_dt if force_rate == 0 else (1.0 / float(force_rate)) + + effective_dt = target_dt * float(select_every_nth) + loaded_fps = 1.0 / effective_dt if effective_dt > 0 else float(fps) + + new_w, new_h = target_size(width, height, custom_width, custom_height, downscale_ratio) + do_resize = (new_w != width) or (new_h != height) + + def _process_frame(bgr): + rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) + if do_resize: + rgb = cv2.resize(rgb, (new_w, new_h), interpolation=cv2.INTER_LANCZOS4) + return rgb + + frames = [] + + evaluated = -1 + + def _maybe_add(bgr): + nonlocal evaluated + evaluated += 1 + if (evaluated % select_every_nth) != 0: + return + frames.append(_process_frame(bgr)) + + _maybe_add(frame0) + + if frame_load_cap > 0 and len(frames) >= frame_load_cap: + cap.release() + arr = np.stack(frames, axis=0).astype(np.float32) / 255.0 + t = torch.from_numpy(arr) + loaded_duration = float(len(t) * effective_dt) + start_time = 0.0 + return t, float(fps), float(loaded_fps), loaded_duration, start_time + + time_offset = target_dt + time_offset -= target_dt + + while cap.isOpened(): + if time_offset < target_dt: + ok = cap.grab() + if not ok: + break + time_offset += base_dt + continue + + ok, frame_bgr = cap.retrieve() + if not ok or frame_bgr is None: + break + + _maybe_add(frame_bgr) + + if frame_load_cap > 0 and len(frames) >= frame_load_cap: + break + + time_offset -= target_dt + + cap.release() + + if len(frames) == 0: + raise RuntimeError(f"No frames could be read from: {video_path}") + + arr = np.stack(frames, axis=0).astype(np.float32) / 255.0 + t = torch.from_numpy(arr) + + loaded_duration = float(len(t) * effective_dt) + start_time = 0.0 + return t, float(fps), float(loaded_fps), loaded_duration, start_time + + +class LoadVideoBatchListFromDir: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"default": ""}), + "force_rate": ("FLOAT", {"default": 0, "min": 0, "max": 120, "step": 1}), + "width": ("INT", {"default": 720, "min": 0, "max": 8192, "step": 1}), + "height": ("INT", {"default": 1280, "min": 0, "max": 8192, "step": 1}), + }, + "optional": { + "video_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "frame_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "select_every_nth": ("INT", {"default": 1, "min": 1, "max": 0xFFFFFFFF, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "step": 1}), + "load_always": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "sort_method": (sort_methods,), + }, + } + + RETURN_TYPES = ("IMAGE", "AUDIO", "INT") + RETURN_NAMES = ("IMAGE", "audio", "COUNT") + OUTPUT_IS_LIST = (True, True, False) + + FUNCTION = "load_videos" + CATEGORY = "video" + + @classmethod + def IS_CHANGED(cls, **kwargs): + if kwargs.get("load_always"): + return float("NaN") + return hash(frozenset(kwargs.items())) + + def load_videos( + self, + directory: str, + force_rate: float = 0, + width: int = 0, + height: int = 0, + video_load_cap: int = 0, + frame_load_cap: int = 0, + select_every_nth: int = 1, + start_index: int = 0, + load_always: bool = False, + sort_method=None, + ): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory}' cannot be found.") + + files = os.listdir(directory) + if len(files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + + valid_ext = {".mp4", ".mov", ".mkv", ".webm", ".avi", ".m4v"} + files = [ + f + for f in files + if os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1].lower() in valid_ext + ] + if len(files) == 0: + raise FileNotFoundError(f"No video files in directory '{directory}' (expected: {sorted(valid_ext)}).") + + files = sort_by(files, directory, sort_method) + files = files[start_index:] + if video_load_cap > 0: + files = files[:video_load_cap] + + images_list = [] + audios_list = [] + + for fname in files: + path = os.path.join(directory, fname) + + vid, source_fps, loaded_fps, loaded_duration, start_time = _read_frames_vhs_like( + path, + force_rate=force_rate, + custom_width=width, + custom_height=height, + downscale_ratio=8, + frame_load_cap=frame_load_cap, + select_every_nth=select_every_nth, + ) + + images_list.append(vid) + + audio = lazy_get_audio(path, start_time, loaded_duration) + audios_list.append(audio) + + return (images_list, audios_list, len(images_list)) diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/openpose_smoother.py b/zavodik/nodes/comfyui-teskors-utils/nodes/openpose_smoother.py new file mode 100644 index 0000000000000000000000000000000000000000..53c2d70afa8c4c509d23eb3f4caf17e221991327 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/openpose_smoother.py @@ -0,0 +1,1837 @@ +from __future__ import annotations + +import copy +import math +import pickle +import threading +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import cv2 +import torch + + +# ============================================================ +# ComfyUI Node (pose_data + PKL) +# ============================================================ + +_GLOBAL_LOCK = threading.Lock() + + +class KPSSmoothPoseDataAndRender: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "pose_data": ("POSEDATA",), + "filter_extra_people": ("BOOLEAN", {"default": True}), + "smooth_alpha": ("FLOAT", {"default": 0.7, "min": 0.01, "max": 0.99, "step": 0.01}), + "gap_frames": ("INT", {"default": 12, "min": 0, "max": 100, "step": 1}), + "min_run_frames": ("INT", {"default": 3, "min": 1, "max": 60, "step": 1}), + "conf_thresh_body": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.01}), + "conf_thresh_hands": ("FLOAT", {"default": 0.60, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("IMAGE", "POSEDATA") + RETURN_NAMES = ("IMAGE", "pose_data") + FUNCTION = "run" + CATEGORY = "posedata" + + def run(self, pose_data, **kwargs): + filter_extra_people = bool(kwargs.get("filter_extra_people", True)) + + smooth_alpha = float(kwargs.get("smooth_alpha", 0.7)) + gap_frames = int(kwargs.get("gap_frames", 12)) + min_run_frames = int(kwargs.get("min_run_frames", 2)) + + conf_thresh_body = float(kwargs.get("conf_thresh_body", 0.20)) + conf_thresh_hands = float(kwargs.get("conf_thresh_hands", 0.50)) + conf_thresh_face = 0.20 + + force_body_18 = bool(kwargs.get("force_body_18", False)) + + pose_data = _coerce_pose_data_to_obj(pose_data) + frames_json_like, meta_ref = _pose_data_to_kps_frames(pose_data, force_body_18=force_body_18) + + with _GLOBAL_LOCK: + old = _snapshot_tunable_globals() + try: + globals()["CONF_GATE_BODY"] = conf_thresh_body + globals()["CONF_GATE_HAND"] = conf_thresh_hands + globals()["CONF_GATE_FACE"] = conf_thresh_face + + globals()["ALPHA_BODY"] = smooth_alpha + globals()["SUPER_SMOOTH_ALPHA"] = smooth_alpha + globals()["MAX_GAP_FRAMES"] = gap_frames + globals()["MIN_RUN_FRAMES"] = min_run_frames + globals()["DENSE_SUPER_SMOOTH_ALPHA"] = smooth_alpha + globals()["DENSE_MAX_GAP_FRAMES"] = gap_frames + globals()["DENSE_MIN_RUN_FRAMES"] = min_run_frames + globals()["FILTER_EXTRA_PEOPLE"] = filter_extra_people + + smoothed_frames = smooth_KPS_json_obj( + frames_json_like, + keep_face_untouched=False, + keep_hands_untouched=False, + filter_extra_people=filter_extra_people, + ) + finally: + _restore_tunable_globals(old) + + out_pose_data = _kps_frames_to_pose_data(pose_data, smoothed_frames, meta_ref, force_body_18=force_body_18) + + w, h = _extract_canvas_wh(smoothed_frames, default_w=720, default_h=1280) + frames_np = [] + for fr in smoothed_frames: + if isinstance(fr, dict) and fr.get("people"): + img = _draw_pose_frame_full( + w, + h, + fr["people"][0], + conf_thresh_body=conf_thresh_body, + conf_thresh_hands=conf_thresh_hands, + conf_thresh_face=conf_thresh_face, + ) + else: + img = np.zeros((h, w, 3), dtype=np.uint8) + frames_np.append(img) + + frames_t = torch.from_numpy(np.stack(frames_np, axis=0)).float() / 255.0 + return (frames_t, out_pose_data) + + +# ============================================================ +# PKL / pose_data IO +# ============================================================ + + +class _PoseDummyObj: + def __init__(self, *a, **k): + pass + + def __setstate__(self, state): + if isinstance(state, dict): + self.__dict__.update(state) + elif isinstance(state, (list, tuple)) and len(state) == 2 and isinstance(state[0], dict): + self.__dict__.update(state[0]) + if isinstance(state[1], dict): + self.__dict__.update(state[1]) + else: + self.__dict__["_slotstate"] = state[1] + else: + self.__dict__["_state"] = state + + +class _SafeUnpickler(pickle.Unpickler): + def find_class(self, module, name): + if module.startswith("numpy._core"): + module = module.replace("numpy._core", "numpy.core", 1) + if module.startswith("numpy._globals"): + module = module.replace("numpy._globals", "numpy", 1) + if name in {"AAPoseMeta"}: + return _PoseDummyObj + try: + return super().find_class(module, name) + except Exception: + return _PoseDummyObj + + +def _load_pose_data_pkl(path: str) -> Any: + with open(path, "rb") as f: + return _SafeUnpickler(f).load() + + +def _coerce_pose_data_to_obj(pd: Any) -> Any: + if isinstance(pd, str): + return _load_pose_data_pkl(pd) + if isinstance(pd, dict) and "pose_data" in pd: + return pd["pose_data"] + return pd + + +def _as_attr(x: Any, key: str, default=None): + if isinstance(x, dict): + return x.get(key, default) + return getattr(x, key, default) + + +def _set_attr(x: Any, key: str, value: Any): + if isinstance(x, dict): + x[key] = value + else: + setattr(x, key, value) + + +def _xy_p_to_flat(xy: Optional[np.ndarray], p: Optional[np.ndarray]) -> Optional[List[float]]: + if xy is None: + return None + arr = np.asarray(xy) + if arr.ndim != 2 or arr.shape[1] < 2: + return None + N = arr.shape[0] + if p is None: + pp = np.ones((N,), dtype=np.float32) + else: + pp = np.asarray(p).reshape(-1) + if pp.shape[0] != N: + pp = np.ones((N,), dtype=np.float32) + out: List[float] = [] + for i in range(N): + out.extend([float(arr[i, 0]), float(arr[i, 1]), float(pp[i])]) + return out + + +def _flat_to_xy_p(flat: Optional[List[float]]) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]: + if not isinstance(flat, list) or len(flat) % 3 != 0: + return None, None + N = len(flat) // 3 + xy = np.zeros((N, 2), dtype=np.float32) + p = np.zeros((N,), dtype=np.float32) + for i in range(N): + xy[i, 0] = float(flat[3 * i + 0]) + xy[i, 1] = float(flat[3 * i + 1]) + p[i] = float(flat[3 * i + 2]) + return xy, p + + +def _pose_data_to_kps_frames(pose_data: Any, *, force_body_18: bool) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: + pose_metas = _as_attr(pose_data, "pose_metas", None) + if pose_metas is None: + pose_metas = _as_attr(pose_data, "frames", None) + if pose_metas is None or not isinstance(pose_metas, list): + raise ValueError("pose_data does not contain 'pose_metas' list.") + frames: List[Dict[str, Any]] = [] + for meta in pose_metas: + h = _as_attr(meta, "height", 1280) + w = _as_attr(meta, "width", 720) + kps_body = _as_attr(meta, "kps_body", None) + kps_body_p = _as_attr(meta, "kps_body_p", None) + kps_face = _as_attr(meta, "kps_face", None) + kps_face_p = _as_attr(meta, "kps_face_p", None) + kps_lhand = _as_attr(meta, "kps_lhand", None) + kps_lhand_p = _as_attr(meta, "kps_lhand_p", None) + kps_rhand = _as_attr(meta, "kps_rhand", None) + kps_rhand_p = _as_attr(meta, "kps_rhand_p", None) + + pose_flat = _xy_p_to_flat(kps_body, kps_body_p) + face_flat = _xy_p_to_flat(kps_face, kps_face_p) + lh_flat = _xy_p_to_flat(kps_lhand, kps_lhand_p) + rh_flat = _xy_p_to_flat(kps_rhand, kps_rhand_p) + + if force_body_18 and isinstance(pose_flat, list) and len(pose_flat) >= 18 * 3: + pose_flat = pose_flat[: 18 * 3] + + person = { + "pose_keypoints_2d": pose_flat if pose_flat is not None else [], + "face_keypoints_2d": face_flat if face_flat is not None else [], + "hand_left_keypoints_2d": lh_flat, + "hand_right_keypoints_2d": rh_flat, + } + frame = {"people": [person], "canvas_height": int(h), "canvas_width": int(w)} + frames.append(frame) + + meta_ref = {"pose_metas": pose_metas, "len": len(pose_metas)} + return frames, meta_ref + + +def _kps_frames_to_pose_data( + pose_data_in: Any, frames_kps: List[Dict[str, Any]], meta_ref: Dict[str, Any], *, force_body_18: bool +) -> Any: + out_pd = copy.deepcopy(pose_data_in) + pose_metas_out = _as_attr(out_pd, "pose_metas", None) + if pose_metas_out is None: + pose_metas_out = meta_ref.get("pose_metas") + if pose_metas_out is None or not isinstance(pose_metas_out, list): + raise ValueError("Failed to locate pose_metas in output pose_data.") + + T = min(len(pose_metas_out), len(frames_kps)) + for t in range(T): + meta = pose_metas_out[t] + fr = frames_kps[t] + people = fr.get("people", []) if isinstance(fr, dict) else [] + p0 = people[0] if people else None + if not isinstance(p0, dict): + continue + + pose_flat = p0.get("pose_keypoints_2d") + face_flat = p0.get("face_keypoints_2d") + lh_flat = p0.get("hand_left_keypoints_2d") + rh_flat = p0.get("hand_right_keypoints_2d") + + if force_body_18 and isinstance(pose_flat, list) and len(pose_flat) >= 18 * 3: + pose_flat = pose_flat[: 18 * 3] + + body_xy, body_p = _flat_to_xy_p(pose_flat if isinstance(pose_flat, list) else None) + face_xy, face_p = _flat_to_xy_p(face_flat if isinstance(face_flat, list) else None) + lh_xy, lh_p = _flat_to_xy_p(lh_flat if isinstance(lh_flat, list) else None) + rh_xy, rh_p = _flat_to_xy_p(rh_flat if isinstance(rh_flat, list) else None) + + if body_xy is not None and body_p is not None: + _set_attr(meta, "kps_body", body_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_body_p", body_p.astype(np.float32, copy=False)) + if face_xy is not None and face_p is not None: + _set_attr(meta, "kps_face", face_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_face_p", face_p.astype(np.float32, copy=False)) + if lh_xy is not None and lh_p is not None: + _set_attr(meta, "kps_lhand", lh_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_lhand_p", lh_p.astype(np.float32, copy=False)) + if rh_xy is not None and rh_p is not None: + _set_attr(meta, "kps_rhand", rh_xy.astype(np.float32, copy=False)) + _set_attr(meta, "kps_rhand_p", rh_p.astype(np.float32, copy=False)) + + if isinstance(fr, dict): + if "canvas_width" in fr: + _set_attr(meta, "width", int(fr["canvas_width"])) + if "canvas_height" in fr: + _set_attr(meta, "height", int(fr["canvas_height"])) + + _set_attr(out_pd, "pose_metas", pose_metas_out) + return out_pd + + +def _extract_canvas_wh(data: Any, default_w: int, default_h: int) -> Tuple[int, int]: + w, h = int(default_w), int(default_h) + if isinstance(data, list): + for fr in data: + if isinstance(fr, dict) and "canvas_width" in fr and "canvas_height" in fr: + try: + w = int(fr["canvas_width"]) + h = int(fr["canvas_height"]) + break + except Exception: + pass + return w, h + + +# ============================================================ +# === START: smooth_KPS_json.py logic (ported as-is) +# ============================================================ + +ROOTSCALE_CARRY_ENABLED = True +CARRY_MAX_FRAMES = 48 +CARRY_MIN_ANCHORS = 2 +CARRY_ANCHOR_JOINTS = [0, 1, 2, 5, 3, 6, 4, 7] +CARRY_CONF_GATE = 0.20 + +FILTER_EXTRA_PEOPLE = True +MAIN_PERSON_MODE = "longest_track" +TRACK_MATCH_MIN_PX = 80.0 +TRACK_MATCH_FACTOR = 3.0 +TRACK_MAX_FRAME_GAP = 32 + +SPATIAL_OUTLIER_FIX = True +BONE_MAX_FACTOR = 2.3 +TORSO_RADIUS_FACTOR = 4.0 + +ALPHA_BODY = 0.70 +MAX_STEP_BODY = 60.0 +VEL_ALPHA = 0.45 +EPS = 0.3 +CONF_GATE_BODY = 0.20 +CONF_FLOOR_BODY = 0.00 + +TRACK_DIST_PENALTY = 1.5 +FACE_WEIGHT_IN_SCORE = 0.15 +HAND_WEIGHT_IN_SCORE = 0.35 + +ALLOW_DISAPPEAR_JOINTS = {3, 4, 6, 7} + +GAP_FILL_ENABLED = True +MAX_GAP_FRAMES = 12 +MIN_RUN_FRAMES = 2 + +TORSO_SYNC_ENABLED = True +TORSO_JOINTS = {1, 2, 5, 8, 11} +TORSO_LOOKAHEAD_FRAMES = 32 + +SUPER_SMOOTH_ENABLED = True +SUPER_SMOOTH_ALPHA = 0.7 +SUPER_SMOOTH_MIN_CONF = 0.20 + +MEDIAN3_ENABLED = True +FACE_SMOOTH_ENABLED = True +HANDS_SMOOTH_ENABLED = False + +CONF_GATE_FACE = 0.20 +CONF_GATE_HAND = 0.50 + +HAND_MIN_POINTS_PRESENT = 7 +MIN_HAND_RUN_FRAMES = 6 + +DENSE_GAP_FILL_ENABLED = False +DENSE_MAX_GAP_FRAMES = 8 +DENSE_MIN_RUN_FRAMES = 2 +DENSE_MEDIAN3_ENABLED = False +DENSE_SUPER_SMOOTH_ENABLED = False +DENSE_SUPER_SMOOTH_ALPHA = 0.7 + + +def _snapshot_tunable_globals() -> Dict[str, Any]: + keys = [ + "FILTER_EXTRA_PEOPLE", + "SUPER_SMOOTH_ALPHA", + "MAX_GAP_FRAMES", + "MIN_RUN_FRAMES", + "DENSE_SUPER_SMOOTH_ALPHA", + "DENSE_MAX_GAP_FRAMES", + "DENSE_MIN_RUN_FRAMES", + "ALPHA_BODY", + "CONF_GATE_BODY", + "CONF_GATE_HAND", + "CONF_GATE_FACE", + ] + return {k: globals().get(k) for k in keys} + + +def _restore_tunable_globals(old: Dict[str, Any]) -> None: + for k, v in old.items(): + globals()[k] = v + + +def _is_valid_xyc(x: float, y: float, c: float) -> bool: + if c is None or c <= 0: + return False + if x == 0 and y == 0: + return False + if math.isnan(x) or math.isnan(y) or math.isnan(c): + return False + return True + + +def _reshape_keypoints_2d(arr: List[float]) -> List[Tuple[float, float, float]]: + if arr is None: + return [] + out = [] + for i in range(0, len(arr), 3): + out.append((float(arr[i]), float(arr[i + 1]), float(arr[i + 2]))) + return out + + +def _flatten_keypoints_2d(kps: List[Tuple[float, float, float]]) -> List[float]: + out: List[float] = [] + for x, y, c in kps: + out.extend([float(x), float(y), float(c)]) + return out + + +def _sum_conf(arr: Optional[List[float]], sample_step: int = 1) -> float: + if not arr: + return 0.0 + s = 0.0 + for i in range(2, len(arr), 3 * sample_step): + try: + c = float(arr[i]) + except Exception: + c = 0.0 + if c > 0: + s += c + return s + + +def _body_center_from_pose(pose_arr: Optional[List[float]]) -> Optional[Tuple[float, float]]: + if not pose_arr: + return None + kps = _reshape_keypoints_2d(pose_arr) + idxs = [2, 5, 8, 11, 1] + pts = [] + for idx in idxs: + if idx < len(kps) and _is_valid_xyc(*kps[idx]): + pts.append((kps[idx][0], kps[idx][1])) + if not pts: + for x, y, c in kps: + if _is_valid_xyc(x, y, c): + pts.append((x, y)) + if not pts: + return None + return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) + + +def _dist(a: Tuple[float, float], b: Tuple[float, float]) -> float: + return math.hypot(a[0] - b[0], a[1] - b[1]) + + +def _choose_single_person( + people: List[Dict[str, Any]], prev_center: Optional[Tuple[float, float]] +) -> Optional[Dict[str, Any]]: + if not people: + return None + best = None + best_score = -1e18 + for p in people: + pose = p.get("pose_keypoints_2d") + score = _sum_conf(pose) + score += FACE_WEIGHT_IN_SCORE * _sum_conf(p.get("face_keypoints_2d"), 4) + score += HAND_WEIGHT_IN_SCORE * ( + _sum_conf(p.get("hand_left_keypoints_2d"), 2) + _sum_conf(p.get("hand_right_keypoints_2d"), 2) + ) + center = _body_center_from_pose(pose) + if prev_center is not None and center is not None: + score -= TRACK_DIST_PENALTY * _dist(prev_center, center) + if score > best_score: + best_score = score + best = p + return best + + +@dataclass +class _Track: + frames: Dict[int, Dict[str, Any]] + centers: Dict[int, Tuple[float, float]] + last_t: int + last_center: Tuple[float, float] + + +def _estimate_torso_scale(pose: List[Tuple[float, float, float]]) -> Optional[float]: + def dist(i, k): + if i >= len(pose) or k >= len(pose): + return None + if not _is_valid_xyc(*pose[i]) or not _is_valid_xyc(*pose[k]): + return None + return math.hypot(pose[i][0] - pose[k][0], pose[i][1] - pose[k][1]) + + cand = [c for c in [dist(2, 5), dist(8, 11), dist(1, 8), dist(1, 11)] if c is not None and c > 1e-3] + if not cand: + return None + return float(sum(cand) / len(cand)) + + +def _track_match_threshold_from_pose(pose_arr: Optional[List[float]]) -> float: + if isinstance(pose_arr, list): + s = _estimate_torso_scale(_reshape_keypoints_2d(pose_arr)) + if s is not None: + return max(float(TRACK_MATCH_MIN_PX), float(TRACK_MATCH_FACTOR) * float(s)) + return float(max(TRACK_MATCH_MIN_PX, 120.0)) + + +def _build_tracks_over_video(frames_data: List[Any]) -> List[_Track]: + tracks: List[_Track] = [] + for t, frame in enumerate(frames_data): + if not isinstance(frame, dict): + continue + people = frame.get("people", []) + if not isinstance(people, list) or not people: + continue + + cand = [] + for i, p in enumerate(people): + if not isinstance(p, dict): + continue + c = _body_center_from_pose(p.get("pose_keypoints_2d")) + if c is not None: + cand.append((i, p, c)) + if not cand: + continue + + used = set() + track_order = sorted(range(len(tracks)), key=lambda k: tracks[k].last_t, reverse=True) + for k in track_order: + tr = tracks[k] + if (t - tr.last_t) > int(TRACK_MAX_FRAME_GAP): + continue + best_idx, best_d = None, 1e18 + for i, p, cc in cand: + if i in used: + continue + thr = _track_match_threshold_from_pose(p.get("pose_keypoints_2d")) + d = _dist(tr.last_center, cc) + if d <= thr and d < best_d: + best_d = d + best_idx = i + if best_idx is not None: + i, p, cc = next(x for x in cand if x[0] == best_idx) + used.add(i) + tr.frames[t], tr.centers[t], tr.last_t, tr.last_center = p, cc, t, cc + for i, p, cc in cand: + if i not in used: + tracks.append(_Track(frames={t: p}, centers={t: cc}, last_t=t, last_center=cc)) + return tracks + + +def _track_presence_score(tr: _Track) -> Tuple[int, float, float]: + face_sum, body_sum = 0.0, 0.0 + for p in tr.frames.values(): + face_sum += _sum_conf(p.get("face_keypoints_2d"), 4) + body_sum += _sum_conf(p.get("pose_keypoints_2d"), 1) + return (len(tr.frames), face_sum, body_sum) + + +def _pick_main_track(tracks: List[_Track]) -> Optional[_Track]: + if not tracks: + return None + best, best_key = None, (-1, -1e18, -1e18) + for tr in tracks: + key = _track_presence_score(tr) + if key > best_key: + best_key, best = key, tr + return best + + +@dataclass +class BodyState: + last_xy: List[Optional[Tuple[float, float]]] + last_v: List[Tuple[float, float]] + + def __init__(self, joints: int): + self.last_xy = [None] * joints + self.last_v = [(0.0, 0.0)] * joints + + +def _smooth_body_pose(pose_arr: Optional[List[float]], state: BodyState) -> Optional[List[float]]: + if pose_arr is None: + return None + kps = _reshape_keypoints_2d(pose_arr) + J = len(kps) + if len(state.last_xy) != J: + state.last_xy = [None] * J + state.last_v = [(0.0, 0.0)] * J + + out: List[Tuple[float, float, float]] = [] + for j in range(J): + x, y, c = kps[j] + last = state.last_xy[j] + vx_last, vy_last = state.last_v[j] + valid_in = _is_valid_xyc(x, y, c) and (c >= CONF_GATE_BODY) + + if valid_in: + if last is None: + state.last_xy[j] = (x, y) + state.last_v[j] = (0.0, 0.0) + out.append((x, y, float(c))) + continue + + dx_raw, dy_raw = x - last[0], y - last[1] + if abs(dx_raw) < EPS: + dx_raw = 0.0 + if abs(dy_raw) < EPS: + dy_raw = 0.0 + + vx = VEL_ALPHA * dx_raw + (1.0 - VEL_ALPHA) * vx_last + vy = VEL_ALPHA * dy_raw + (1.0 - VEL_ALPHA) * vy_last + nx = ALPHA_BODY * x + (1.0 - ALPHA_BODY) * (last[0] + vx) + ny = ALPHA_BODY * y + (1.0 - ALPHA_BODY) * (last[1] + vy) + + d = math.hypot(nx - last[0], ny - last[1]) + if d > MAX_STEP_BODY and d > 1e-6: + scale = MAX_STEP_BODY / d + nx = last[0] + (nx - last[0]) * scale + ny = last[1] + (ny - last[1]) * scale + vx, vy = nx - last[0], ny - last[1] + + state.last_xy[j], state.last_v[j] = (nx, ny), (vx, vy) + out.append((nx, ny, float(c))) + else: + state.last_xy[j] = None + state.last_v[j] = (0.0, 0.0) + out.append((0.0, 0.0, 0.0)) + + return _flatten_keypoints_2d(out) + + +COCO18_EDGES = [ + (1, 2), + (2, 3), + (3, 4), + (1, 5), + (5, 6), + (6, 7), + (1, 8), + (8, 9), + (9, 10), + (1, 11), + (11, 12), + (12, 13), + (8, 11), + (1, 0), + (0, 14), + (14, 16), + (0, 15), + (15, 17), +] +HAND21_EDGES = [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (0, 5), + (5, 6), + (6, 7), + (7, 8), + (0, 9), + (9, 10), + (10, 11), + (11, 12), + (0, 13), + (13, 14), + (14, 15), + (15, 16), + (0, 17), + (17, 18), + (18, 19), + (19, 20), +] + +_NEIGHBORS = None + + +def _build_neighbors(): + global _NEIGHBORS + if _NEIGHBORS is not None: + return + _NEIGHBORS = {} + for a, b in COCO18_EDGES: + _NEIGHBORS.setdefault(a, set()).add(b) + _NEIGHBORS.setdefault(b, set()).add(a) + + +def _suppress_spatial_outliers_in_pose_arr( + pose_arr: Optional[List[float]], *, conf_gate: float +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + pose = _reshape_keypoints_2d(pose_arr) + J = len(pose) + center = _body_center_from_pose(pose_arr) + scale = _estimate_torso_scale(pose) + if center is None or scale is None: + return pose_arr + + max_r, max_bone = TORSO_RADIUS_FACTOR * scale, BONE_MAX_FACTOR * scale + out = [list(p) for p in pose] + + def visible(j): + return j < J and out[j][2] >= conf_gate and not (out[j][0] == 0 and out[j][1] == 0) + + for j in range(J): + if visible(j) and math.hypot(out[j][0] - center[0], out[j][1] - center[1]) > max_r: + out[j] = [0.0, 0.0, 0.0] + + for a, b in COCO18_EDGES: + if a >= J or b >= J: + continue + if not visible(a) or not visible(b): + continue + if math.hypot(out[a][0] - out[b][0], out[a][1] - out[b][1]) > max_bone: + if out[a][2] <= out[b][2]: + out[a] = [0.0, 0.0, 0.0] + else: + out[b] = [0.0, 0.0, 0.0] + + flat = [] + for p in out: + flat.extend(p) + return flat + + +def _suppress_isolated_joints_in_pose_arr( + pose_arr: Optional[List[float]], *, conf_gate: float, keep: set[int] = None +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return pose_arr + _build_neighbors() + pose = _reshape_keypoints_2d(pose_arr) + J, out = len(pose), [list(p) for p in pose] + keep = keep or set() + + def vis(j): + return j < J and out[j][2] >= conf_gate and not (out[j][0] == 0 and out[j][1] == 0) + + for j in range(J): + if j in keep or not vis(j): + continue + if not any(n < J and vis(n) for n in _NEIGHBORS.get(j, set())): + out[j] = [0.0, 0.0, 0.0] + + flat = [] + for p in out: + flat.extend(p) + return flat + + +def _denoise_and_fill_gaps_pose_seq( + pose_arr_seq: List[Optional[List[float]]], *, conf_gate: float, min_run: int, max_gap: int +) -> List[Optional[List[float]]]: + if not pose_arr_seq: + return pose_arr_seq + J = next( + (len(arr) // 3 for arr in pose_arr_seq if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0), None + ) + if J is None: + return pose_arr_seq + T = len(pose_arr_seq) + out_seq = [list(arr) if isinstance(arr, list) and len(arr) == J * 3 else arr for arr in pose_arr_seq] + + def is_vis(arr, j): + return float(arr[3 * j + 2]) >= conf_gate and not (float(arr[3 * j + 0]) == 0 and float(arr[3 * j + 1]) == 0) + + for j in range(J): + start = None + for t in range(T + 1): + cur = t < T and isinstance(out_seq[t], list) and is_vis(out_seq[t], j) + if cur and start is None: + start = t + if not cur and start is not None: + if (t - start) < min_run: + for k in range(start, t): + if isinstance(out_seq[k], list): + out_seq[k][3 * j : 3 * j + 3] = [0.0, 0.0, 0.0] + start = None + + for j in range(J): + t = 0 + while t < T: + arr = out_seq[t] + if isinstance(arr, list) and is_vis(arr, j): + last_vis_t = t + t += 1 + while t < T: + if isinstance(out_seq[t], list) and is_vis(out_seq[t], j): + break + t += 1 + if t < T and (t - last_vis_t - 1) > 0 and (t - last_vis_t - 1) <= max_gap: + a, b = out_seq[last_vis_t], out_seq[t] + ax, ay, ac = float(a[3 * j]), float(a[3 * j + 1]), float(a[3 * j + 2]) + bx, by, bc = float(b[3 * j]), float(b[3 * j + 1]), float(b[3 * j + 2]) + for k in range(last_vis_t + 1, t): + if isinstance(out_seq[k], list): + r = (k - last_vis_t) / (t - last_vis_t) + out_seq[k][3 * j : 3 * j + 3] = [ax + (bx - ax) * r, ay + (by - ay) * r, min(ac, bc)] + else: + t += 1 + return out_seq + + +def _zero_lag_ema_pose_seq( + pose_seq: List[Optional[List[float]]], *, alpha: float, conf_gate: float +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + J = next((len(arr) // 3 for arr in pose_seq if isinstance(arr, list) and len(arr) % 3 == 0 and len(arr) > 0), None) + if J is None: + return pose_seq + T = len(pose_seq) + + def is_vis(arr, j): + return float(arr[3 * j + 2]) >= conf_gate and not (float(arr[3 * j + 0]) == 0 and float(arr[3 * j + 1]) == 0) + + fwd, last = [None] * T, [None] * J + for t in range(T): + if not isinstance(pose_seq[t], list) or len(pose_seq[t]) != J * 3: + fwd[t] = pose_seq[t] + continue + out = list(pose_seq[t]) + for j in range(J): + if is_vis(pose_seq[t], j): + x, y = float(pose_seq[t][3 * j]), float(pose_seq[t][3 * j + 1]) + sx, sy = ( + (x, y) + if last[j] is None + else (alpha * x + (1 - alpha) * last[j][0], alpha * y + (1 - alpha) * last[j][1]) + ) + last[j], out[3 * j], out[3 * j + 1] = (sx, sy), float(sx), float(sy) + else: + last[j] = None + fwd[t] = out + + bwd, last = [None] * T, [None] * J + for t in range(T - 1, -1, -1): + if not isinstance(fwd[t], list) or len(fwd[t]) != J * 3: + bwd[t] = fwd[t] + continue + out = list(fwd[t]) + for j in range(J): + if is_vis(fwd[t], j): + x, y = float(fwd[t][3 * j]), float(fwd[t][3 * j + 1]) + sx, sy = ( + (x, y) + if last[j] is None + else (alpha * x + (1 - alpha) * last[j][0], alpha * y + (1 - alpha) * last[j][1]) + ) + last[j], out[3 * j], out[3 * j + 1] = (sx, sy), float(sx), float(sy) + else: + last[j] = None + bwd[t] = out + return bwd + + +def _apply_root_scale( + pose_arr: Optional[List[float]], *, src_root, src_scale, dst_root, dst_scale +) -> Optional[List[float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0 or src_scale <= 1e-6 or dst_scale <= 1e-6: + return pose_arr + kps = _reshape_keypoints_2d(pose_arr) + s = dst_scale / src_scale + out = [ + ( + (dst_root[0] + (x - src_root[0]) * s, dst_root[1] + (y - src_root[1]) * s, c) + if c > 0 and not (x == 0 and y == 0) + else (x, y, c) + ) + for x, y, c in kps + ] + return _flatten_keypoints_2d(out) + + +def _carry_pose_when_torso_missing( + pose_seq: List[Optional[List[float]]], *, conf_gate, max_carry, anchor_joints, min_anchors +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + J = next((len(a) // 3 for a in pose_seq if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0), None) + if J is None: + return pose_seq + out = [a if a is None else list(a) for a in pose_seq] + FILL = {1, 8, 9, 10, 11, 12, 13} - set(ALLOW_DISAPPEAR_JOINTS) + + def is_vis(arr, j): + return float(arr[3 * j + 2]) >= conf_gate and not (float(arr[3 * j]) == 0 and float(arr[3 * j + 1]) == 0) + + def rs_anchors(arr): + pts = [(float(arr[3 * j]), float(arr[3 * j + 1])) for j in anchor_joints if j < J and is_vis(arr, j)] + if len(pts) < min_anchors: + return None + xs, ys = [p[0] for p in pts], [p[1] for p in pts] + s = max(max(xs) - min(xs), max(ys) - min(ys)) + if s <= 1e-3: + return None + return (sum(xs) / len(pts), sum(ys) / len(pts)), float(s) + + last_good, last_rs, carry = None, None, 0 + for t, arr in enumerate(out): + if not isinstance(arr, list) or len(arr) != J * 3: + continue + rs = rs_anchors(arr) + if ( + sum(1 for j in anchor_joints if j < J and is_vis(arr, j)) >= min_anchors + and rs + and sum(1 for j in FILL if j < J and is_vis(arr, j)) >= 2 + ): + last_good, last_rs, carry = list(arr), rs, max_carry + continue + if rs and last_good and last_rs and carry > 0: + carried = _apply_root_scale( + last_good, src_root=last_rs[0], src_scale=last_rs[1], dst_root=rs[0], dst_scale=rs[1] + ) + if isinstance(carried, list) and len(carried) == J * 3: + for j in FILL: + if ( + j < J + and not is_vis(arr, j) + and (float(carried[3 * j]) != 0 or float(carried[3 * j + 1]) != 0) + and float(carried[3 * j + 2]) > 0 + ): + arr[3 * j : 3 * j + 3] = [ + float(carried[3 * j]), + float(carried[3 * j + 1]), + max(min(float(carried[3 * j + 2]), 0.60), conf_gate), + ] + out[t], carry = arr, carry - 1 + continue + carry = max(carry - 1, 0) + return out + + +def _force_full_torso_pair( + pose_seq: List[Optional[List[float]]], + *, + conf_gate, + anchor_joints, + min_anchors, + max_lookback=240, + fill_legs_with_hip=True, + always_fill_if_one_hip=True, +) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + J = next((len(a) // 3 for a in pose_seq if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0), None) + if J is None: + return pose_seq + out = [a if a is None else list(a) for a in pose_seq] + + def is_vis(arr, j): + return ( + j < J and float(arr[3 * j + 2]) >= conf_gate and not (float(arr[3 * j]) == 0 and float(arr[3 * j + 1]) == 0) + ) + + def rs_anchors(arr): + pts = [(float(arr[3 * j]), float(arr[3 * j + 1])) for j in anchor_joints if j < J and is_vis(arr, j)] + if len(pts) < min_anchors: + return None + xs, ys = [p[0] for p in pts], [p[1] for p in pts] + s = max(max(xs) - min(xs), max(ys) - min(ys)) + return ((sum(xs) / len(pts), sum(ys) / len(pts)), float(s)) if s > 1e-3 else None + + last_idx, last_f, last_rs = None, None, None + for t, arr in enumerate(out): + if not isinstance(arr, list) or len(arr) != J * 3: + continue + rs = rs_anchors(arr) + r_ok, l_ok = is_vis(arr, 8), is_vis(arr, 11) + if rs and sum(1 for j in anchor_joints if is_vis(arr, j)) >= min_anchors and r_ok and l_ok: + last_idx, last_f, last_rs = t, list(arr), rs + continue + if ( + not last_f + or not last_rs + or (t - last_idx) > max_lookback + or not rs + or (r_ok and l_ok) + or (not r_ok and not l_ok and not always_fill_if_one_hip) + ): + continue + carried = _apply_root_scale(last_f, src_root=last_rs[0], src_scale=last_rs[1], dst_root=rs[0], dst_scale=rs[1]) + if not isinstance(carried, list) or len(carried) != J * 3: + continue + + def cp(j): + if ( + j < J + and not is_vis(arr, j) + and (float(carried[3 * j]) != 0 or float(carried[3 * j + 1]) != 0) + and float(carried[3 * j + 2]) > 0 + ): + arr[3 * j : 3 * j + 3] = [ + float(carried[3 * j]), + float(carried[3 * j + 1]), + max(min(float(carried[3 * j + 2]), 0.60), conf_gate), + ] + + if not r_ok: + cp(8) + if fill_legs_with_hip: + cp(9) + cp(10) + if not l_ok: + cp(11) + if fill_legs_with_hip: + cp(12) + cp(13) + out[t] = arr + return out + + +def _median3_pose_seq(pose_seq: List[Optional[List[float]]], *, conf_gate: float) -> List[Optional[List[float]]]: + if not pose_seq: + return pose_seq + J = next((len(a) // 3 for a in pose_seq if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0), None) + if J is None: + return pose_seq + T = len(pose_seq) + + def is_vis(arr, j): + return float(arr[3 * j + 2]) >= conf_gate and not (float(arr[3 * j]) == 0 and float(arr[3 * j + 1]) == 0) + + out_seq = [] + for t in range(T): + if not isinstance(pose_seq[t], list) or len(pose_seq[t]) != J * 3: + out_seq.append(pose_seq[t]) + continue + out = list(pose_seq[t]) + a0, a1, a2 = pose_seq[max(0, t - 1)], pose_seq[t], pose_seq[min(T - 1, t + 1)] + for j in range(J): + if not is_vis(pose_seq[t], j): + continue + xs, ys = [], [] + for aa in (a0, a1, a2): + if isinstance(aa, list) and len(aa) == J * 3 and is_vis(aa, j): + xs.append(float(aa[3 * j])) + ys.append(float(aa[3 * j + 1])) + if len(xs) >= 2: + xs.sort() + ys.sort() + out[3 * j], out[3 * j + 1] = float(xs[len(xs) // 2]), float(ys[len(ys) // 2]) + out_seq.append(out) + return out_seq + + +def _sync_group_appearances( + pose_arr_seq: List[Optional[List[float]]], *, group: set[int], conf_gate: float, lookahead: int +) -> List[Optional[List[float]]]: + if not pose_arr_seq: + return pose_arr_seq + J = next((len(a) // 3 for a in pose_arr_seq if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0), None) + if J is None: + return pose_arr_seq + T = len(pose_arr_seq) + out = [list(a) if isinstance(a, list) and len(a) == J * 3 else a for a in pose_arr_seq] + + def is_vis(arr, j): + return float(arr[3 * j + 2]) >= conf_gate and not (float(arr[3 * j]) == 0 and float(arr[3 * j + 1]) == 0) + + for t in range(T): + arr = out[t] + if not isinstance(arr, list): + continue + vis = {j for j in group if j < J and is_vis(arr, j)} + if not vis: + continue + for j in list({j for j in group if j < J and j not in vis}): + t2 = next( + ( + tt + for tt in range(t + 1, min(T, t + 1 + lookahead)) + if isinstance(out[tt], list) and is_vis(out[tt], j) + ), + None, + ) + if t2 is None: + continue + last_t = next((tb for tb in range(t - 1, -1, -1) if isinstance(out[tb], list) and is_vis(out[tb], j)), None) + b = out[t2] + if last_t is None: + for k in range(t, t2): + if isinstance(out[k], list): + out[k][3 * j : 3 * j + 3] = b[3 * j : 3 * j + 3] + else: + a = out[last_t] + if float(a[3 * j]) == 0 and float(a[3 * j + 1]) == 0: + continue + c_fill = min(float(a[3 * j + 2]), float(b[3 * j + 2])) + for tt in range(t, t2): + if isinstance(out[tt], list): + r = (tt - last_t) / (t2 - last_t) + out[tt][3 * j : 3 * j + 3] = [ + float(a[3 * j]) + (float(b[3 * j]) - float(a[3 * j])) * r, + float(a[3 * j + 1]) + (float(b[3 * j + 1]) - float(a[3 * j + 1])) * r, + float(c_fill), + ] + return out + + +def _count_valid_points(arr: Optional[List[float]], *, conf_gate: float) -> int: + if not isinstance(arr, list) or len(arr) % 3 != 0: + return 0 + return sum( + 1 + for i in range(0, len(arr), 3) + if float(arr[i + 2]) >= conf_gate and not (float(arr[i]) == 0 and float(arr[i + 1]) == 0) + ) + + +def _zero_out_kps(arr: Optional[List[float]]) -> Optional[List[float]]: + if not isinstance(arr, list) or len(arr) % 3 != 0: + return arr + out = list(arr) + for i in range(0, len(out), 3): + out[i : i + 3] = [0.0, 0.0, 0.0] + return out + + +def _pin_body_wrist_to_hand( + p_out: Dict[str, Any], *, side: str, conf_gate_body: float, conf_gate_hand: float, blend: float +) -> None: + bw, hk = (4, "hand_right_keypoints_2d") if side == "right" else (7, "hand_left_keypoints_2d") + pose, hand = p_out.get("pose_keypoints_2d"), p_out.get(hk) + if not isinstance(pose, list) or not isinstance(hand, list) or len(pose) < (bw * 3 + 3) or len(hand) < 3: + return + hx, hy, hc = float(hand[0]), float(hand[1]), float(hand[2]) + if hc < conf_gate_hand or (hx == 0.0 and hy == 0.0): + return + bx, by, bc = float(pose[bw * 3]), float(pose[bw * 3 + 1]), float(pose[bw * 3 + 2]) + if bc < conf_gate_body or (bx == 0.0 and by == 0.0): + pose[bw * 3 : bw * 3 + 3] = [hx, hy, float(max(bc, min(hc, 0.9)))] + else: + pose[bw * 3 : bw * 3 + 3] = [ + bx * (1.0 - blend) + hx * blend, + by * (1.0 - blend) + hy * blend, + float(min(bc, hc)), + ] + p_out["pose_keypoints_2d"] = pose + + +def _fix_elbow_using_wrist(p_out: Dict[str, Any], *, side: str, conf_gate: float) -> None: + pose = p_out.get("pose_keypoints_2d") + if not isinstance(pose, list) or len(pose) % 3 != 0: + return + sh, el, wr = (2, 3, 4) if side == "right" else (5, 6, 7) + + def vis(x, y, c): + return c >= conf_gate and not (x == 0.0 and y == 0.0) + + sx, sy, sc = float(pose[3 * sh]), float(pose[3 * sh + 1]), float(pose[3 * sh + 2]) + ex, ey, ec = float(pose[3 * el]), float(pose[3 * el + 1]), float(pose[3 * el + 2]) + wx, wy, wc = float(pose[3 * wr]), float(pose[3 * wr + 1]), float(pose[3 * wr + 2]) + if not vis(sx, sy, sc) or not vis(wx, wy, wc): + return + if vis(ex, ey, ec): + Lse, Lew = math.hypot(ex - sx, ey - sy), math.hypot(wx - ex, wy - ey) + else: + dsw = math.hypot(wx - sx, wy - sy) + if dsw < 1e-3: + return + Lse, Lew = 0.55 * dsw, 0.45 * dsw + dx, dy = wx - sx, wy - sy + d = math.hypot(dx, dy) + if d < 1e-6: + return + d2 = max(min(d, (Lse + Lew) - 1e-3), abs(Lse - Lew) + 1e-3) + a = (Lse * Lse - Lew * Lew + d2 * d2) / (2.0 * d2) + h = math.sqrt(max(Lse * Lse - a * a, 0.0)) + px, py = sx + a * (dx / d), sy + a * (dy / d) + rx, ry = -dy / d, dx / d + e1x, e1y, e2x, e2y = px + h * rx, py + h * ry, px - h * rx, py - h * ry + nx, ny = ( + (e1x, e1y) + if not vis(ex, ey, ec) or math.hypot(e1x - ex, e1y - ey) <= math.hypot(e2x - ex, e2y - ey) + else (e2x, e2y) + ) + pose[3 * el : 3 * el + 3] = [float(nx), float(ny), float(max(min(ec, 0.8), conf_gate))] + p_out["pose_keypoints_2d"] = pose + + +def _remove_short_presence_runs_kps_seq( + seq: List[Optional[List[float]]], *, conf_gate: float, min_points_present: int, min_run: int +) -> List[Optional[List[float]]]: + if not seq: + return seq + out = [None if a is None else list(a) for a in seq] + start = None + for t in range(len(seq) + 1): + cur = t < len(seq) and _count_valid_points(seq[t], conf_gate=conf_gate) >= min_points_present + if cur and start is None: + start = t + if not cur and start is not None: + if (t - start) < min_run: + for k in range(start, t): + out[k] = _zero_out_kps(out[k]) + start = None + return out + + +def _zero_sparse_frames_kps_seq( + seq: List[Optional[List[float]]], *, conf_gate: float, min_points_present: int +) -> List[Optional[List[float]]]: + if not seq: + return seq + return [ + ( + _zero_out_kps(a) + if isinstance(a, list) and _count_valid_points(a, conf_gate=conf_gate) < min_points_present + else a + ) + for a in seq + ] + + +def _suppress_spatial_outliers_in_hand_arr( + hand_arr: Optional[List[float]], *, conf_gate: float, max_bone_factor: float = 3.0 +) -> Optional[List[float]]: + if not isinstance(hand_arr, list) or len(hand_arr) % 3 != 0: + return hand_arr + pts = _reshape_keypoints_2d(hand_arr) + J = len(pts) + if J < 21: + return hand_arr + out = [list(p) for p in pts] + + def vis(j): + return out[j][2] >= conf_gate and not (out[j][0] == 0 and out[j][1] == 0) + + vv = [(x, y) for x, y, c in out if c >= conf_gate and not (x == 0 and y == 0)] + if len(vv) < 6: + return hand_arr + xs, ys = [p[0] for p in vv], [p[1] for p in vv] + s = max(max(xs) - min(xs), max(ys) - min(ys)) + if s <= 1e-3: + return hand_arr + max_bone = max_bone_factor * s + for a, b in HAND21_EDGES: + if a >= J or b >= J or not vis(a) or not vis(b): + continue + if math.hypot(out[a][0] - out[b][0], out[a][1] - out[b][1]) > max_bone: + if out[a][2] <= out[b][2]: + out[a] = [0.0, 0.0, 0.0] + else: + out[b] = [0.0, 0.0, 0.0] + return _flatten_keypoints_2d([(x, y, c) for x, y, c in out]) + + +def _body_head_root_scale_from_pose( + pose_arr: Optional[List[float]], *, conf_gate: float +) -> Optional[Tuple[Tuple[float, float], float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return None + kps = _reshape_keypoints_2d(pose_arr) + + def vis(j): + return ( + (float(kps[j][0]), float(kps[j][1])) + if j < len(kps) and kps[j][2] >= conf_gate and not (kps[j][0] == 0 and kps[j][1] == 0) + else None + ) + + pts = [p for p in (vis(j) for j in [0, 1, 14, 15, 16, 17]) if p is not None] + if not pts: + return None + root = (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) + cands = [ + math.hypot(a[0] - b[0], a[1] - b[1]) + for a, b in ((vis(14), vis(15)), (vis(16), vis(17)), (vis(2), vis(5))) + if a and b and math.hypot(a[0] - b[0], a[1] - b[1]) > 1e-3 + ] + return (root, float(sum(cands) / len(cands))) if cands else None + + +def _body_wrist_root_scale_from_pose( + pose_arr: Optional[List[float]], *, side: str, conf_gate: float +) -> Optional[Tuple[Tuple[float, float], float]]: + if not isinstance(pose_arr, list) or len(pose_arr) % 3 != 0: + return None + kps = _reshape_keypoints_2d(pose_arr) + w, e = (4, 3) if side == "right" else (7, 6) + + def vis(j): + return ( + (float(kps[j][0]), float(kps[j][1])) + if j < len(kps) and kps[j][2] >= conf_gate and not (kps[j][0] == 0 and kps[j][1] == 0) + else None + ) + + pw = vis(w) + if not pw: + return None + pe = vis(e) + s = math.hypot(pw[0] - pe[0], pw[1] - pe[1]) if pe and math.hypot(pw[0] - pe[0], pw[1] - pe[1]) > 1e-3 else None + if s is None: + p2, p5 = vis(2), vis(5) + if p2 and p5 and math.hypot(p2[0] - p5[0], p2[1] - p5[1]) > 1e-3: + s = math.hypot(p2[0] - p5[0], p2[1] - p5[1]) + return (pw, float(s)) if s else None + + +def _smooth_dense_seq_anchored_to_body( + dense_seq: List[Optional[List[float]]], + body_pose_seq: List[Optional[List[float]]], + *, + kind: str, + conf_gate_dense: float, + conf_gate_body: float, + median3: bool, + zero_lag_alpha: float, +) -> List[Optional[List[float]]]: + if not dense_seq: + return dense_seq + Jd = next((len(a) // 3 for a in dense_seq if isinstance(a, list) and len(a) % 3 == 0 and len(a) > 0), None) + if Jd is None: + return dense_seq + T = len(dense_seq) + out = [None if a is None else list(a) for a in dense_seq] + norm_seq = [None] * T + + for t in range(T): + arr, body = out[t], body_pose_seq[t] if t < len(body_pose_seq) else None + if not isinstance(arr, list) or len(arr) != Jd * 3 or not isinstance(body, list): + norm_seq[t] = arr + continue + rs = ( + _body_head_root_scale_from_pose(body, conf_gate=conf_gate_body) + if kind == "face" + else _body_wrist_root_scale_from_pose( + body, side="left" if kind == "hand_left" else "right", conf_gate=conf_gate_body + ) + ) + if not rs or rs[1] <= 1e-6: + norm_seq[t] = arr + continue + (rx, ry), s = rs + norm_seq[t] = [ + (x - rx) / s if i % 3 == 0 else (y - ry) / s if i % 3 == 1 else c + for i, (x, y, c) in enumerate(zip(arr[0::3], arr[1::3], arr[2::3])) + ] + + if median3: + norm_seq = _median3_pose_seq(norm_seq, conf_gate=conf_gate_dense) + norm_seq = _zero_lag_ema_pose_seq(norm_seq, alpha=zero_lag_alpha, conf_gate=conf_gate_dense) + + for t in range(T): + arrn, body = norm_seq[t], body_pose_seq[t] if t < len(body_pose_seq) else None + if not isinstance(arrn, list) or len(arrn) != Jd * 3 or not isinstance(body, list): + continue + rs = ( + _body_head_root_scale_from_pose(body, conf_gate=conf_gate_body) + if kind == "face" + else _body_wrist_root_scale_from_pose( + body, side="left" if kind == "hand_left" else "right", conf_gate=conf_gate_body + ) + ) + if not rs or rs[1] <= 1e-6: + continue + (rx, ry), s = rs + for j in range(Jd): + if ( + out[t][3 * j + 2] >= conf_gate_dense + and arrn[3 * j + 2] >= conf_gate_dense + and not (out[t][3 * j] == 0 and out[t][3 * j + 1] == 0) + ): + out[t][3 * j : 3 * j + 2] = [rx + arrn[3 * j] * s, ry + arrn[3 * j + 1] * s] + return out + + +def smooth_KPS_json_obj( + data: Any, + *, + keep_face_untouched: bool = True, + keep_hands_untouched: bool = True, + filter_extra_people: Optional[bool] = None, +) -> Any: + if not isinstance(data, list): + raise ValueError("Expected top-level JSON to be a list of frames.") + filter_extra_people = bool(FILTER_EXTRA_PEOPLE) if filter_extra_people is None else filter_extra_people + chosen_people: List[Optional[Dict[str, Any]]] = [None] * len(data) + + if MAIN_PERSON_MODE == "longest_track": + main_tr = _pick_main_track(_build_tracks_over_video(data)) + if main_tr: + for t in range(len(data)): + if t in main_tr.frames: + chosen_people[t] = main_tr.frames[t] + else: + prev_center = None + for i, frame in enumerate(data): + if not isinstance(frame, dict) or not frame.get("people"): + continue + chosen_people[i] = _choose_single_person(frame.get("people", []), prev_center) + if chosen_people[i]: + c = _body_center_from_pose(chosen_people[i].get("pose_keypoints_2d")) + if c: + prev_center = c + else: + prev_center = None + for i, frame in enumerate(data): + if not isinstance(frame, dict) or not frame.get("people"): + continue + chosen_people[i] = _choose_single_person(frame.get("people", []), prev_center) + if chosen_people[i]: + c = _body_center_from_pose(chosen_people[i].get("pose_keypoints_2d")) + if c: + prev_center = c + + pose_seq = [p.get("pose_keypoints_2d") if isinstance(p, dict) else None for p in chosen_people] + + if SPATIAL_OUTLIER_FIX: + pose_seq = [ + _suppress_spatial_outliers_in_pose_arr(arr, conf_gate=CONF_GATE_BODY) if arr else None for arr in pose_seq + ] + + if GAP_FILL_ENABLED: + pose_seq = _denoise_and_fill_gaps_pose_seq( + pose_seq, conf_gate=CONF_GATE_BODY, min_run=MIN_RUN_FRAMES, max_gap=MAX_GAP_FRAMES + ) + + if TORSO_SYNC_ENABLED: + pose_seq = _sync_group_appearances( + pose_seq, group=TORSO_JOINTS, conf_gate=CONF_GATE_BODY, lookahead=TORSO_LOOKAHEAD_FRAMES + ) + + pose_seq = [ + _suppress_isolated_joints_in_pose_arr(arr, conf_gate=CONF_GATE_BODY, keep=TORSO_JOINTS) if arr else None + for arr in pose_seq + ] + + if MEDIAN3_ENABLED: + pose_seq = _median3_pose_seq(pose_seq, conf_gate=CONF_GATE_BODY) + if SUPER_SMOOTH_ENABLED: + pose_seq = _zero_lag_ema_pose_seq(pose_seq, alpha=SUPER_SMOOTH_ALPHA, conf_gate=SUPER_SMOOTH_MIN_CONF) + if ROOTSCALE_CARRY_ENABLED: + pose_seq = _carry_pose_when_torso_missing( + pose_seq, + conf_gate=CARRY_CONF_GATE, + max_carry=CARRY_MAX_FRAMES, + anchor_joints=CARRY_ANCHOR_JOINTS, + min_anchors=CARRY_MIN_ANCHORS, + ) + pose_seq = _force_full_torso_pair( + pose_seq, + conf_gate=CARRY_CONF_GATE, + anchor_joints=CARRY_ANCHOR_JOINTS, + min_anchors=CARRY_MIN_ANCHORS, + max_lookback=240, + fill_legs_with_hip=True, + always_fill_if_one_hip=True, + ) + pose_seq = _denoise_and_fill_gaps_pose_seq(pose_seq, conf_gate=CONF_GATE_BODY, min_run=MIN_RUN_FRAMES, max_gap=0) + + face_seq = [p.get("face_keypoints_2d") if isinstance(p, dict) else None for p in chosen_people] + lh_seq = [p.get("hand_left_keypoints_2d") if isinstance(p, dict) else None for p in chosen_people] + rh_seq = [p.get("hand_right_keypoints_2d") if isinstance(p, dict) else None for p in chosen_people] + + if HANDS_SMOOTH_ENABLED and not keep_hands_untouched: + lh_seq = [_suppress_spatial_outliers_in_hand_arr(a, conf_gate=CONF_GATE_HAND) if a else None for a in lh_seq] + rh_seq = [_suppress_spatial_outliers_in_hand_arr(a, conf_gate=CONF_GATE_HAND) if a else None for a in rh_seq] + lh_seq = _remove_short_presence_runs_kps_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT, min_run=MIN_HAND_RUN_FRAMES + ) + rh_seq = _remove_short_presence_runs_kps_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT, min_run=MIN_HAND_RUN_FRAMES + ) + lh_seq = _zero_sparse_frames_kps_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT + ) + rh_seq = _zero_sparse_frames_kps_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_points_present=HAND_MIN_POINTS_PRESENT + ) + if DENSE_GAP_FILL_ENABLED: + lh_seq = _denoise_and_fill_gaps_pose_seq( + lh_seq, conf_gate=CONF_GATE_HAND, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + rh_seq = _denoise_and_fill_gaps_pose_seq( + rh_seq, conf_gate=CONF_GATE_HAND, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + lh_seq = _smooth_dense_seq_anchored_to_body( + lh_seq, + pose_seq, + kind="hand_left", + conf_gate_dense=CONF_GATE_HAND, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + rh_seq = _smooth_dense_seq_anchored_to_body( + rh_seq, + pose_seq, + kind="hand_right", + conf_gate_dense=CONF_GATE_HAND, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + + if FACE_SMOOTH_ENABLED and not keep_face_untouched: + if DENSE_GAP_FILL_ENABLED: + face_seq = _denoise_and_fill_gaps_pose_seq( + face_seq, conf_gate=CONF_GATE_FACE, min_run=DENSE_MIN_RUN_FRAMES, max_gap=DENSE_MAX_GAP_FRAMES + ) + face_seq = _smooth_dense_seq_anchored_to_body( + face_seq, + pose_seq, + kind="face", + conf_gate_dense=CONF_GATE_FACE, + conf_gate_body=CONF_GATE_BODY, + median3=DENSE_MEDIAN3_ENABLED, + zero_lag_alpha=DENSE_SUPER_SMOOTH_ALPHA, + ) + + out_frames = [] + body_state: Optional[BodyState] = None + + for i, frame in enumerate(data): + if not isinstance(frame, dict): + out_frames.append(frame) + continue + + frame_out = copy.deepcopy(frame) + chosen = chosen_people[i] + + if chosen is None: + if filter_extra_people: + frame_out["people"] = [] + out_frames.append(frame_out) + body_state = None + continue + + p_out = copy.deepcopy(chosen) + p_out["pose_keypoints_2d"] = pose_seq[i] + + pose_arr = p_out.get("pose_keypoints_2d") + joints = (len(pose_arr) // 3) if isinstance(pose_arr, list) else 0 + if body_state is None: + body_state = BodyState(joints if joints > 0 else 18) + + p_out["pose_keypoints_2d"] = _smooth_body_pose(p_out.get("pose_keypoints_2d"), body_state) + + if FACE_SMOOTH_ENABLED and not keep_face_untouched: + p_out["face_keypoints_2d"] = face_seq[i] + else: + p_out["face_keypoints_2d"] = chosen.get("face_keypoints_2d", p_out.get("face_keypoints_2d")) + + if HANDS_SMOOTH_ENABLED and not keep_hands_untouched: + p_out["hand_left_keypoints_2d"], p_out["hand_right_keypoints_2d"] = lh_seq[i], rh_seq[i] + else: + p_out["hand_left_keypoints_2d"] = chosen.get("hand_left_keypoints_2d", p_out.get("hand_left_keypoints_2d")) + p_out["hand_right_keypoints_2d"] = chosen.get( + "hand_right_keypoints_2d", p_out.get("hand_right_keypoints_2d") + ) + + _pin_body_wrist_to_hand( + p_out, side="left", conf_gate_body=CONF_GATE_BODY, conf_gate_hand=CONF_GATE_HAND, blend=1.0 + ) + _pin_body_wrist_to_hand( + p_out, side="right", conf_gate_body=CONF_GATE_BODY, conf_gate_hand=CONF_GATE_HAND, blend=1.0 + ) + _fix_elbow_using_wrist(p_out, side="left", conf_gate=CONF_GATE_BODY) + _fix_elbow_using_wrist(p_out, side="right", conf_gate=CONF_GATE_BODY) + + if filter_extra_people: + frame_out["people"] = [p_out] + else: + orig_people = frame.get("people", []) + if not isinstance(orig_people, list): + frame_out["people"] = [p_out] + else: + replaced, new_people = False, [] + for op in orig_people: + if not replaced and (op is chosen): + new_people.append(p_out) + replaced = True + else: + new_people.append(copy.deepcopy(op)) + if not replaced: + new_people = [p_out] + [copy.deepcopy(op) for op in orig_people] + frame_out["people"] = new_people + + out_frames.append(frame_out) + + final_body, final_lh, final_rh, final_face = [], [], [], [] + for f in out_frames: + if f.get("people") and len(f["people"]) > 0: + p = f["people"][0] + final_body.append(p.get("pose_keypoints_2d")) + final_lh.append(p.get("hand_left_keypoints_2d")) + final_rh.append(p.get("hand_right_keypoints_2d")) + final_face.append(p.get("face_keypoints_2d")) + else: + final_body.append(None) + final_lh.append(None) + final_rh.append(None) + final_face.append(None) + + eff_min = max(2, MIN_RUN_FRAMES) + + final_body = _denoise_and_fill_gaps_pose_seq(final_body, conf_gate=CONF_GATE_BODY, min_run=eff_min, max_gap=0) + final_lh = _remove_short_presence_runs_kps_seq( + final_lh, conf_gate=CONF_GATE_HAND, min_points_present=1, min_run=eff_min + ) + final_rh = _remove_short_presence_runs_kps_seq( + final_rh, conf_gate=CONF_GATE_HAND, min_points_present=1, min_run=eff_min + ) + final_face = _remove_short_presence_runs_kps_seq( + final_face, conf_gate=CONF_GATE_FACE, min_points_present=1, min_run=eff_min + ) + + for i, f in enumerate(out_frames): + if f.get("people") and len(f["people"]) > 0: + f["people"][0]["pose_keypoints_2d"] = final_body[i] + f["people"][0]["hand_left_keypoints_2d"] = final_lh[i] + f["people"][0]["hand_right_keypoints_2d"] = final_rh[i] + f["people"][0]["face_keypoints_2d"] = final_face[i] + # ======================================================== + + return out_frames + + +# ============================================================ +# === START: render_pose_video.py logic (ported to frame render) +# ============================================================ + +OP_COLORS: List[Tuple[int, int, int]] = [ + (255, 0, 0), + (255, 85, 0), + (255, 170, 0), + (255, 255, 0), + (170, 255, 0), + (85, 255, 0), + (0, 255, 0), + (0, 255, 85), + (0, 255, 170), + (0, 255, 255), + (0, 170, 255), + (0, 85, 255), + (0, 0, 255), + (85, 0, 255), + (170, 0, 255), + (255, 0, 255), + (255, 0, 170), + (255, 0, 85), +] + +BODY_EDGES: List[Tuple[int, int]] = [ + (1, 2), + (1, 5), + (2, 3), + (3, 4), + (5, 6), + (6, 7), + (1, 8), + (8, 9), + (9, 10), + (1, 11), + (11, 12), + (12, 13), + (1, 0), + (0, 14), + (14, 16), + (0, 15), + (15, 17), +] + +BODY_EDGE_COLORS = OP_COLORS[: len(BODY_EDGES)] +BODY_JOINT_COLORS = OP_COLORS + +HAND_EDGES: List[Tuple[int, int]] = [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (0, 5), + (5, 6), + (6, 7), + (7, 8), + (0, 9), + (9, 10), + (10, 11), + (11, 12), + (0, 13), + (13, 14), + (14, 15), + (15, 16), + (0, 17), + (17, 18), + (18, 19), + (19, 20), +] + + +def _valid_pt(x: float, y: float, c: float, conf_thresh: float) -> bool: + return (c is not None) and (c >= conf_thresh) and not (x == 0 and y == 0) + + +def _hsv_to_bgr(h: float, s: float, v: float) -> Tuple[int, int, int]: + H = int(np.clip(h, 0.0, 1.0) * 179.0) + S = int(np.clip(s, 0.0, 1.0) * 255.0) + V = int(np.clip(v, 0.0, 1.0) * 255.0) + hsv = np.uint8([[[H, S, V]]]) + bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0] + return int(bgr[0]), int(bgr[1]), int(bgr[2]) + + +def _looks_normalized(points: List[Tuple[float, float, float]], conf_thresh: float) -> bool: + valid = [(x, y, c) for (x, y, c) in points if _valid_pt(x, y, c, conf_thresh)] + if not valid: + return False + in01 = sum(1 for (x, y, _) in valid if 0.0 <= x <= 1.0 and 0.0 <= y <= 1.0) + return (in01 / float(len(valid))) >= 0.7 + + +def _draw_body( + canvas: np.ndarray, pose: List[Tuple[float, float, float]], conf_thresh: float, xinsr_stick_scaling: bool = False +) -> None: + CH, CW = canvas.shape[:2] + stickwidth = 2 + valid = [(x, y, c) for (x, y, c) in pose if _valid_pt(x, y, c, conf_thresh)] + norm = False + if valid: + in01 = sum(1 for (x, y, _) in valid if 0.0 <= x <= 1.0 and 0.0 <= y <= 1.0) + norm = (in01 / float(len(valid))) >= 0.7 + + def to_px(x: float, y: float) -> Tuple[float, float]: + if norm: + return x * CW, y * CH + return x, y + + max_side = max(CW, CH) + stick_scale = 1 if max_side < 500 else min(2 + (max_side // 1000), 7) if xinsr_stick_scaling else 1 + + for idx, (a, b) in enumerate(BODY_EDGES): + if a >= len(pose) or b >= len(pose): + continue + ax, ay, ac = pose[a] + bx, by, bc = pose[b] + if not (_valid_pt(ax, ay, ac, conf_thresh) and _valid_pt(bx, by, bc, conf_thresh)): + continue + + ax, ay = to_px(ax, ay) + bx, by = to_px(bx, by) + base = BODY_EDGE_COLORS[idx] if idx < len(BODY_EDGE_COLORS) else (255, 255, 255) + + X = np.array([ay, by], dtype=np.float32) + Y = np.array([ax, bx], dtype=np.float32) + + mX, mY = float(np.mean(X)), float(np.mean(Y)) + length = float(np.hypot(X[0] - X[1], Y[0] - Y[1])) + if length < 1.0: + continue + + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly( + (int(mY), int(mX)), (int(length / 2), int(stickwidth * stick_scale)), int(angle), 0, 360, 1 + ) + cv2.fillConvexPoly(canvas, polygon, (int(base[0] * 0.6), int(base[1] * 0.6), int(base[2] * 0.6))) + + for j, (x, y, c) in enumerate(pose): + if not _valid_pt(x, y, c, conf_thresh): + continue + x, y = to_px(x, y) + col = BODY_JOINT_COLORS[j] if j < len(BODY_JOINT_COLORS) else (255, 255, 255) + cv2.circle(canvas, (int(x), int(y)), 2, col, thickness=-1) + + +def _draw_hand(canvas: np.ndarray, hand: List[Tuple[float, float, float]], conf_thresh: float) -> None: + if not hand or len(hand) < 21: + return + CH, CW = canvas.shape[:2] + norm = _looks_normalized(hand, conf_thresh) + + def to_px(x: float, y: float) -> Tuple[float, float]: + return (x * CW, y * CH) if norm else (x, y) + + n_edges = len(HAND_EDGES) + for i, (a, b) in enumerate(HAND_EDGES): + x1, y1, c1 = hand[a] + x2, y2, c2 = hand[b] + if _valid_pt(x1, y1, c1, conf_thresh) and _valid_pt(x2, y2, c2, conf_thresh): + x1, y1 = to_px(x1, y1) + x2, y2 = to_px(x2, y2) + cv2.line( + canvas, + (int(x1), int(y1)), + (int(x2), int(y2)), + _hsv_to_bgr(i / float(n_edges), 1.0, 1.0), + 1, + cv2.LINE_AA, + ) + for x, y, c in hand: + if _valid_pt(x, y, c, conf_thresh): + x, y = to_px(x, y) + cv2.circle(canvas, (int(x), int(y)), 1, (0, 0, 255), -1, cv2.LINE_AA) + + +def _draw_face(canvas: np.ndarray, face: List[Tuple[float, float, float]], conf_thresh: float) -> None: + if not face: + return + CH, CW = canvas.shape[:2] + norm = _looks_normalized(face, conf_thresh) + + def to_px(x: float, y: float) -> Tuple[float, float]: + return (x * CW, y * CH) if norm else (x, y) + + for x, y, c in face: + if _valid_pt(x, y, c, conf_thresh): + x, y = to_px(x, y) + cv2.circle(canvas, (int(x), int(y)), 0, (255, 255, 255), -1, cv2.LINE_AA) + + +def _draw_pose_frame_full( + w: int, + h: int, + person: Dict[str, Any], + conf_thresh_body: float = 0.10, + conf_thresh_hands: float = 0.10, + conf_thresh_face: float = 0.10, +) -> np.ndarray: + img = np.zeros((h, w, 3), dtype=np.uint8) + pose = _reshape_keypoints_2d(person.get("pose_keypoints_2d") or []) + face = _reshape_keypoints_2d(person.get("face_keypoints_2d") or []) + hand_l = _reshape_keypoints_2d(person.get("hand_left_keypoints_2d") or []) + hand_r = _reshape_keypoints_2d(person.get("hand_right_keypoints_2d") or []) + + if pose: + _draw_body(img, pose, conf_thresh_body) + if hand_l: + _draw_hand(img, hand_l, conf_thresh_hands) + if hand_r: + _draw_hand(img, hand_r, conf_thresh_hands) + if face: + _draw_face(img, face, conf_thresh_face) + return img diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/preview_image_metadata.py b/zavodik/nodes/comfyui-teskors-utils/nodes/preview_image_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..4ac0c7a3bf4a97e6f5e34e331d06e50033c1263d --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/preview_image_metadata.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import os +import time +import random +from typing import Any, Dict, List + +import numpy as np +from PIL import Image + +try: + import folder_paths # ComfyUI +except Exception: + folder_paths = None + + +def _get_temp_dir() -> str: + if folder_paths is not None and hasattr(folder_paths, "get_temp_directory"): + return folder_paths.get_temp_directory() + return os.path.join(os.getcwd(), "temp") + + +def _unique_basename(prefix: str = "preview") -> str: + return f"{prefix}_{int(time.time()*1000)}_{random.randint(0, 999999):06d}" + + +def _tensor_to_pil(img: Any) -> Image.Image: + if hasattr(img, "detach"): + img = img.detach().cpu().numpy() + + arr = np.asarray(img) + + if arr.ndim != 3 or arr.shape[-1] not in (3, 4): + raise ValueError(f"Unsupported image shape for preview: {arr.shape}") + + if np.issubdtype(arr.dtype, np.floating): + arr = np.clip(arr, 0.0, 1.0) + arr = (arr * 255.0).round().astype(np.uint8) + else: + arr = np.clip(arr, 0, 255).astype(np.uint8) + + if arr.shape[-1] == 4: + pil = Image.fromarray(arr, mode="RGBA").convert("RGB") + else: + pil = Image.fromarray(arr, mode="RGB") + + return pil + + +class PreviewImageNoMetadata: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "preview" + OUTPUT_NODE = True + CATEGORY = "image" + + def preview(self, image): + + temp_dir = _get_temp_dir() + os.makedirs(temp_dir, exist_ok=True) + + images_ui: List[Dict[str, str]] = [] + + batch = image + + if hasattr(batch, "detach"): + b = int(batch.shape[0]) + get_item = lambda i: batch[i] + else: + arr = np.asarray(batch) + b = int(arr.shape[0]) + get_item = lambda i: arr[i] + + base = _unique_basename("preview") + + for i in range(b): + pil = _tensor_to_pil(get_item(i)) + + filename = f"{base}_{i:05d}.png" + out_path = os.path.join(temp_dir, filename) + + # Всегда сохраняем БЕЗ metadata + pil.save(out_path, compress_level=4) + + images_ui.append({"filename": filename, "subfolder": "", "type": "temp"}) + + return {"ui": {"images": images_ui}, "result": (image,)} diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/rename_files.py b/zavodik/nodes/comfyui-teskors-utils/nodes/rename_files.py new file mode 100644 index 0000000000000000000000000000000000000000..ab410aec0051c9c27f80a3896d63f54855034d5e --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/rename_files.py @@ -0,0 +1,200 @@ +import os +import re +import uuid +import shutil + + +def extract_first_number(s: str): + match = re.search(r"\d+", s) + return int(match.group()) if match else float("inf") + + +sort_methods = [ + "None", + "Alphabetical (ASC)", + "Alphabetical (DESC)", + "Numerical (ASC)", + "Numerical (DESC)", + "Datetime (ASC)", + "Datetime (DESC)", +] + + +def sort_by(items, base_path=".", method=None): + def fullpath(x): + return os.path.join(base_path, x) + + def get_timestamp(path): + try: + return os.path.getmtime(path) + except FileNotFoundError: + return float("-inf") + + if method == "Alphabetical (ASC)": + return sorted(items) + elif method == "Alphabetical (DESC)": + return sorted(items, reverse=True) + elif method == "Numerical (ASC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0])) + elif method == "Numerical (DESC)": + return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True) + elif method == "Datetime (ASC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x))) + elif method == "Datetime (DESC)": + return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True) + else: + return items + + +def _safe_list_files(directory: str): + return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] + + +def _format_name(index: int, digits: int, prefix: str, ext: str): + """ + ext ожидается как ".png"/".jpg"/".jpeg" (с точкой). + ВАЖНО: underscore после номера ВСЕГДА, потом расширение как есть. + Пример: prefix_0001_.png + """ + num = str(index).zfill(digits) + left = f"{prefix}_" if prefix else "" + return f"{left}{num}_{ext}" + + +def _index_taken(directory: str, digits: int, prefix: str, index: int) -> bool: + """ + Проверяем, занят ли номер index ЛЮБЫМ расширением в папке. + Т.е. если есть prefix_0001_.png, то prefix_0001_.jpg уже нельзя. + """ + num = str(index).zfill(digits) + left = f"{prefix}_" if prefix else "" + start = f"{left}{num}_" + + try: + entries = os.listdir(directory) + except FileNotFoundError: + return False + + for f in entries: + p = os.path.join(directory, f) + if os.path.isfile(p) and f.startswith(start): + return True + return False + + +def _find_next_free_index(directory: str, digits: int, prefix: str, start_from: int = 1) -> int: + idx = max(1, int(start_from)) + while _index_taken(directory, digits, prefix, idx): + idx += 1 + return idx + + +class RenameFilesInDir: + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "directory": ("STRING", {"default": ""}), + }, + "optional": { + "output_directory": ("STRING", {"default": ""}), + "sort_method": (sort_methods,), + "start_index": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "step": 1}), + "files_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "prefix": ("STRING", {"default": ""}), + "digits": ("INT", {"default": 4, "min": 1, "max": 16, "step": 1}), + }, + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("COUNT",) + FUNCTION = "run" + CATEGORY = "InspirePack/files" + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + def run( + self, + directory: str, + output_directory: str = "", + sort_method=None, + start_index: int = 0, + files_load_cap: int = 0, + prefix: str = "", + digits: int = 4, + ): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory}' cannot be found.") + + files = _safe_list_files(directory) + if not files: + return (0,) + + files = sort_by(files, directory, sort_method) + files = files[start_index:] + + if files_load_cap > 0: + files = files[:files_load_cap] + + if not files: + return (0,) + + inplace = (output_directory is None) or (str(output_directory).strip() == "") + + if not inplace: + os.makedirs(output_directory, exist_ok=True) + + count = 0 + + # ---------- COPY MODE ---------- + if not inplace: + for fname in files: + src = os.path.join(directory, fname) + _, ext = os.path.splitext(fname) # ext = ".png" / ".jpg" / ... + + next_idx = _find_next_free_index(output_directory, digits, prefix, start_from=1) + new_name = _format_name(next_idx, digits, prefix, ext) + + dst = os.path.join(output_directory, new_name) + shutil.copy2(src, dst) + count += 1 + + return (count,) + + # ---------- INPLACE RENAME ---------- + temp_map = [] + used_temp = set() + + def _make_temp_name(old_name: str): + while True: + t = f"__tmp__{uuid.uuid4().hex}__{old_name}" + if t not in used_temp and not os.path.exists(os.path.join(directory, t)): + used_temp.add(t) + return t + + # phase1 -> temp + for fname in files: + old_path = os.path.join(directory, fname) + tmp = _make_temp_name(fname) + tmp_path = os.path.join(directory, tmp) + + os.rename(old_path, tmp_path) + temp_map.append((tmp, fname)) + + # phase2 -> final + for tmp, original_name in temp_map: + tmp_path = os.path.join(directory, tmp) + _, ext = os.path.splitext(original_name) + + next_idx = _find_next_free_index(directory, digits, prefix, start_from=1) + new_name = _format_name(next_idx, digits, prefix, ext) + + new_path = os.path.join(directory, new_name) + os.rename(tmp_path, new_path) + count += 1 + + return (count,) diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/save_load_pose.py b/zavodik/nodes/comfyui-teskors-utils/nodes/save_load_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..43002c35c2bcf9b3922d3ed0d55d55b0734fccac --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/save_load_pose.py @@ -0,0 +1,128 @@ +import os +import time +import pickle +import glob +import folder_paths + + +def _ensure_output_dir(): + out_dir = folder_paths.get_output_directory() + os.makedirs(out_dir, exist_ok=True) + return out_dir + + +# ------------------------- +# UI: list PKL/PT under input/** (recursive) +# ------------------------- +def _list_all_pkl_under_input(): + inp = folder_paths.get_input_directory() + exts = (".pkl", ".pickle", ".pt") + + files = [] + for ext in exts: + pattern = os.path.join(inp, "**", f"*{ext}") + files.extend(glob.glob(pattern, recursive=True)) + + rel = [] + for f in files: + if os.path.isfile(f): + r = os.path.relpath(f, inp).replace("\\", "/") + rel.append(r) + + rel = sorted(set(rel)) + return rel if rel else [""] + + +def _abs_from_input(rel_path: str) -> str: + inp = folder_paths.get_input_directory() + return os.path.join(inp, rel_path).replace("\\", "/") + + +def _make_unique_path(base_path: str) -> str: + """ + If file exists, append incremental suffix: + pose_data.pkl + pose_data_0001.pkl + pose_data_0002.pkl + """ + if not os.path.exists(base_path): + return base_path + + directory = os.path.dirname(base_path) + name = os.path.basename(base_path) + base, ext = os.path.splitext(name) + + idx = 1 + while True: + new_name = f"{base}_{idx:04d}{ext}" + new_path = os.path.join(directory, new_name) + if not os.path.exists(new_path): + return new_path + idx += 1 + + +def _default_filename(prefix: str, ext: str): + ts = time.strftime("%Y%m%d_%H%M%S") + return f"{prefix}_{ts}{ext}" + + +class TSSavePoseDataAsPickle: + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "pose_data": ("POSEDATA",), + "filename": ("STRING", {"default": ""}), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("saved_path",) + FUNCTION = "save" + CATEGORY = "save" + + def save(self, pose_data, filename=""): + out_dir = _ensure_output_dir() + filename = (filename or "").strip() + if not filename: + filename = _default_filename("pose_data", ".pkl") + if not filename.lower().endswith((".pkl", ".pickle")): + filename += ".pkl" + + abs_path = _make_unique_path(os.path.join(out_dir, filename)) + + with open(abs_path, "wb") as f: + pickle.dump(pose_data, f, protocol=pickle.HIGHEST_PROTOCOL) + + return (abs_path,) + + +class TSLoadPoseDataPickle: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + # dropdown + Upload, recursive input/** + "file": (_list_all_pkl_under_input(),), + } + } + + RETURN_TYPES = ("POSEDATA",) + RETURN_NAMES = ("pose_data",) + FUNCTION = "load" + CATEGORY = "load" + + def load(self, file): + if not isinstance(file, str) or not file.strip(): + raise ValueError("TS PoseData Pickle: Please select a .pkl/.pt file.") + + abs_path = _abs_from_input(file) + if not os.path.isfile(abs_path): + raise ValueError(f"TS PoseData Pickle: File not found: {abs_path}") + + with open(abs_path, "rb") as f: + pose_data = pickle.load(f) + + return (pose_data,) diff --git a/zavodik/nodes/comfyui-teskors-utils/nodes/video_combine_metadata.py b/zavodik/nodes/comfyui-teskors-utils/nodes/video_combine_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..40bfb1c96aa429c920cc4bbba63760b7bd78e7c8 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/nodes/video_combine_metadata.py @@ -0,0 +1,1311 @@ +"""Single-file extraction of the **Video Combine** node from ComfyUI-VideoHelperSuite. + +- Node class: VideoCombine +- ComfyUI registration name: VHS_VideoCombine + +This is intended to be dropped into your own custom node package as a single .py file. + +Notes: +- This file includes the minimal helpers that VideoCombine depends on (ffmpeg discovery, + simple caching, format json parsing, ffmpeg/gifski subprocess pipelines, etc.). +- It assumes you're running inside ComfyUI, so core modules like `folder_paths`, `server`, + and `comfy.utils.ProgressBar` must be available. +""" + +from __future__ import annotations + +import copy +import datetime +import functools +import itertools +import json +import logging +import os +import re +import shutil +import subprocess +import sys +import time +from pathlib import Path +from string import Template +from typing import Any, Dict, Iterable, Iterator, List, Mapping, Optional, Tuple + +import numpy as np +import torch +from PIL import ExifTags, Image +from PIL.PngImagePlugin import PngInfo + +import folder_paths +import server +from comfy.utils import ProgressBar + +# ----------------------------------------------------------------------------- +# Logging (minimal, compatible) +# ----------------------------------------------------------------------------- +logger = logging.getLogger("VideoHelperSuite.VideoCombine") +if not logger.handlers: + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter("[%(name)s] - %(levelname)s - %(message)s")) + logger.addHandler(handler) +logger.setLevel(logging.INFO) +logger.propagate = False + +ENCODE_ARGS = ("utf-8", "backslashreplace") + +# ----------------------------------------------------------------------------- +# Small utility helpers copied/adapted from VideoHelperSuite +# ----------------------------------------------------------------------------- + +BIGMAX = 2**53 - 1 + + +class MultiInput(str): + """ComfyUI trick: allows a single input to accept multiple types.""" + + def __new__(cls, string: str, allowed_types: Any = "*"): + res = super().__new__(cls, string) + res.allowed_types = allowed_types + return res + + def __ne__(self, other: Any) -> bool: + if self.allowed_types == "*" or other == "*": + return False + return other not in self.allowed_types + + +imageOrLatent = MultiInput("IMAGE", ["IMAGE", "LATENT"]) +floatOrInt = MultiInput("FLOAT", ["FLOAT", "INT"]) + + +class ContainsAll(dict): + """ComfyUI hidden input helper.""" + + def __contains__(self, other: Any) -> bool: # noqa: D401 + return True + + def __getitem__(self, key: Any): + return super().get(key, (None, {})) + + +def cached(duration: int): + """Time-based cache decorator (seconds).""" + + def dec(f): + cached_ret = None + cache_time = 0.0 + + @functools.wraps(f) + def cached_func(): + nonlocal cache_time, cached_ret + now = time.time() + if now > cache_time + duration or cached_ret is None: + cache_time = now + cached_ret = f() + return cached_ret + + return cached_func + + return dec + + +def merge_filter_args(args: List[str], ftype: str = "-vf"): + """Merge multiple -vf occurrences into one (simple, no filter_complex support).""" + + try: + start_index = args.index(ftype) + 1 + index = start_index + while True: + index = args.index(ftype, index) + args[start_index] += "," + args[index + 1] + args.pop(index) + args.pop(index) + except ValueError: + pass + + +# ----------------------------------------------------------------------------- +# ffmpeg / gifski discovery (copied in spirit from VHS) +# ----------------------------------------------------------------------------- + + +def _ffmpeg_suitability(path: str) -> int: + try: + version = subprocess.run([path, "-version"], check=True, capture_output=True).stdout.decode(*ENCODE_ARGS) + except Exception: + return 0 + + score = 0 + simple_criterion = [("libvpx", 20), ("264", 10), ("265", 3), ("svtav1", 5), ("libopus", 1)] + for needle, pts in simple_criterion: + if needle in version: + score += pts + + copyright_index = version.find("2000-2") + if copyright_index >= 0: + yr = version[copyright_index + 6 : copyright_index + 9] + if yr.isnumeric(): + score += int(yr) + + return score + + +def _pick_ffmpeg() -> Optional[str]: + if "VHS_FORCE_FFMPEG_PATH" in os.environ: + return os.environ.get("VHS_FORCE_FFMPEG_PATH") + + ffmpeg_paths: List[str] = [] + + # Prefer imageio-ffmpeg if available. + try: + from imageio_ffmpeg import get_ffmpeg_exe # type: ignore + + ffmpeg_paths.append(get_ffmpeg_exe()) + except Exception: + if "VHS_USE_IMAGEIO_FFMPEG" in os.environ: + raise + logger.warning("Failed to import imageio_ffmpeg") + + if "VHS_USE_IMAGEIO_FFMPEG" in os.environ and ffmpeg_paths: + return ffmpeg_paths[0] + + system_ffmpeg = shutil.which("ffmpeg") + if system_ffmpeg: + ffmpeg_paths.append(system_ffmpeg) + + if os.path.isfile("ffmpeg"): + ffmpeg_paths.append(os.path.abspath("ffmpeg")) + if os.path.isfile("ffmpeg.exe"): + ffmpeg_paths.append(os.path.abspath("ffmpeg.exe")) + + if not ffmpeg_paths: + logger.error("No valid ffmpeg found.") + return None + + if len(ffmpeg_paths) == 1: + return ffmpeg_paths[0] + + return max(ffmpeg_paths, key=_ffmpeg_suitability) + + +ffmpeg_path: Optional[str] = _pick_ffmpeg() + + +gifski_path: Optional[str] = os.environ.get("VHS_GIFSKI") or os.environ.get("JOV_GIFSKI") +if gifski_path is None: + gifski_path = shutil.which("gifski") + + +# ----------------------------------------------------------------------------- +# Built-in video format definitions (embedded from VideoHelperSuite/video_formats) +# ----------------------------------------------------------------------------- + +BUILTIN_VIDEO_FORMATS: Dict[str, Dict[str, Any]] = { + "16bit-png": {"extension": "%03d.png", "input_color_depth": "16bit", "main_pass": ["-n", "-pix_fmt", "rgba64"]}, + "8bit-png": {"extension": "%03d.png", "main_pass": ["-n"]}, + "ProRes": { + "audio_pass": ["-c:a", "pcm_s16le"], + "extension": "mov", + "extra_widgets": [["profile", ["lt", "standard", "hq", "4444", "4444xq"], {"default": "hq"}]], + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-c:v", + "prores_ks", + "-profile:v", + [["$profile"]], + [ + "profile", + { + "1": [[]], + "2": [[]], + "3": [[]], + "4": [ + "has_alpha", + {"False": [["-pix_fmt", "yuv444p10le"]], "True": [["-pix_fmt", "yuva444p10le"]]}, + ], + "4444": [ + "has_alpha", + {"False": [["-pix_fmt", "yuv444p10le"]], "True": [["-pix_fmt", "yuva444p10le"]]}, + ], + "4444xq": [ + "has_alpha", + {"False": [["-pix_fmt", "yuv444p10le"]], "True": [["-pix_fmt", "yuva444p10le"]]}, + ], + "hq": [[]], + "lt": [[]], + "standard": [[]], + }, + ], + "-vf", + "scale=out_color_matrix=bt709", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + }, + "av1-webm": { + "audio_pass": ["-c:a", "libopus"], + "environment": {"SVT_LOG": "1"}, + "extension": "webm", + "fake_trc": "bt709", + "input_color_depth": ["input_color_depth", ["8bit", "16bit"]], + "main_pass": [ + "-n", + "-c:v", + "libsvtav1", + "-pix_fmt", + ["pix_fmt", ["yuv420p10le", "yuv420p"]], + "-crf", + ["crf", "INT", {"default": 23, "max": 100, "min": 0, "step": 1}], + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + }, + "ffmpeg-gif": { + "extension": "gif", + "main_pass": [ + "-n", + "-filter_complex", + [ + "dither", + [ + "bayer", + "heckbert", + "floyd_steinberg", + "sierra2", + "sierra2_4a", + "sierra3", + "burkes", + "atkinson", + "none", + ], + {"default": "sierra2_4a"}, + "[0:v] split [a][b]; [a] palettegen=reserve_transparent=on:transparency_color=ffffff " + "[p]; [b][p] paletteuse=dither=$val", + ], + ], + }, + "ffv1-mkv": { + "audio_pass": ["-c:a", "flac"], + "extension": "mkv", + "input_color_depth": "16bit", + "main_pass": [ + "-n", + "-c:v", + "ffv1", + "-level", + ["level", ["0", "1", "3"], {"default": "3"}], + "-coder", + ["coder", ["0", "1", "2"], {"default": "1"}], + "-context", + ["context", ["0", "1"], {"default": "1"}], + "-g", + ["gop_size", "INT", {"default": 1, "max": 300, "min": 1, "step": 1}], + "-slices", + ["slices", ["4", "6", "9", "12", "16", "20", "24", "30"], {"default": "16"}], + "-slicecrc", + ["slicecrc", ["0", "1"], {"default": "1"}], + "-pix_fmt", + [ + "pix_fmt", + [ + "rgba64le", + "bgra", + "yuv420p", + "yuv422p", + "yuv444p", + "yuva420p", + "yuva422p", + "yuva444p", + "yuv420p10le", + "yuv422p10le", + "yuv444p10le", + "yuv420p12le", + "yuv422p12le", + "yuv444p12le", + "yuv420p14le", + "yuv422p14le", + "yuv444p14le", + "yuv420p16le", + "yuv422p16le", + "yuv444p16le", + "gray", + "gray10le", + "gray12le", + "gray16le", + ], + {"default": "rgba64le"}, + ], + ], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + "trim_to_audio": ["trim_to_audio", "BOOLEAN", {"default": True}], + }, + "gifski": { + "extension": "gif", + "gifski_pass": ["-Q", ["quality", "INT", {"default": 90, "max": 100, "min": 1, "step": 1}]], + "main_pass": ["-pix_fmt", "yuv444p", "-vf", "scale=out_color_matrix=bt709:out_range=pc", "-color_range", "pc"], + }, + "h264-mp4": { + "audio_pass": ["-c:a", "aac"], + "extension": "mp4", + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-c:v", + "libx264", + "-pix_fmt", + ["pix_fmt", ["yuv420p", "yuv420p10le"]], + "-crf", + ["crf", "INT", {"default": 19, "max": 100, "min": 0, "step": 1}], + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + "trim_to_audio": ["trim_to_audio", "BOOLEAN", {"default": True}], + }, + "h265-mp4": { + "audio_pass": ["-c:a", "aac"], + "extension": "mp4", + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-c:v", + "libx265", + "-vtag", + "hvc1", + "-pix_fmt", + ["pix_fmt", ["yuv420p10le", "yuv420p"]], + "-crf", + ["crf", "INT", {"default": 22, "max": 100, "min": 0, "step": 1}], + "-preset", + "medium", + "-x265-params", + "log-level=quiet", + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + }, + "nvenc_av1-mp4": { + "audio_pass": ["-c:a", "aac"], + "bitrate": ["bitrate", "INT", {"default": 10, "max": 999, "min": 1, "step": 1}], + "extension": "mp4", + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-c:v", + "av1_nvenc", + "-pix_fmt", + ["pix_fmt", ["yuv420p", "p010le"]], + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "megabit": ["megabit", "BOOLEAN", {"default": True}], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + }, + "nvenc_h264-mp4": { + "audio_pass": ["-c:a", "aac"], + "bitrate": ["bitrate", "INT", {"default": 10, "max": 999, "min": 1, "step": 1}], + "extension": "mp4", + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-c:v", + "h264_nvenc", + "-pix_fmt", + ["pix_fmt", ["yuv420p", "p010le"]], + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "megabit": ["megabit", "BOOLEAN", {"default": True}], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + }, + "nvenc_hevc-mp4": { + "audio_pass": ["-c:a", "aac"], + "bitrate": ["bitrate", "INT", {"default": 10, "max": 999, "min": 1, "step": 1}], + "extension": "mp4", + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-c:v", + "hevc_nvenc", + "-vtag", + "hvc1", + "-pix_fmt", + ["pix_fmt", ["yuv420p", "p010le"]], + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "megabit": ["megabit", "BOOLEAN", {"default": True}], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + }, + "webm": { + "audio_pass": ["-c:a", "libvorbis"], + "extension": "webm", + "fake_trc": "bt709", + "main_pass": [ + "-n", + "-pix_fmt", + ["pix_fmt", ["yuv420p", "yuva420p"]], + "-crf", + ["crf", "INT", {"default": 20, "max": 100, "min": 0, "step": 1}], + "-b:v", + "0", + "-vf", + "scale=out_color_matrix=bt709", + "-color_range", + "tv", + "-colorspace", + "bt709", + "-color_primaries", + "bt709", + "-color_trc", + "bt709", + ], + "save_metadata": ["save_metadata", "BOOLEAN", {"default": True}], + "trim_to_audio": ["trim_to_audio", "BOOLEAN", {"default": True}], + }, +} + + +# ----------------------------------------------------------------------------- +# Requeue support for BatchManager compatibility (as in VHS) +# ----------------------------------------------------------------------------- + +prompt_queue = server.PromptServer.instance.prompt_queue + + +def requeue_workflow_unchecked(): + """Requeues the current workflow without checking for multiple requeues.""" + + currently_running = prompt_queue.currently_running + value = next(iter(currently_running.values())) + + if len(value) == 6: + (_, prompt_id, prompt, extra_data, outputs_to_execute, sensitive) = value + else: + (_, prompt_id, prompt, extra_data, outputs_to_execute) = value + sensitive = {} + + prompt = prompt.copy() + for uid in prompt: + if prompt[uid]["class_type"] == "VHS_BatchManager": + prompt[uid]["inputs"]["requeue"] = prompt[uid]["inputs"].get("requeue", 0) + 1 + + number = -server.PromptServer.instance.number + server.PromptServer.instance.number += 1 + prompt_id = str(server.uuid.uuid4()) + prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive)) + + +_requeue_guard = [None, 0, 0, {}] + + +def requeue_workflow(requeue_required: Tuple[Any, bool] = (-1, True)): + """Requeue once all managed outputs have finished this batch.""" + + assert len(prompt_queue.currently_running) == 1 + global _requeue_guard + + value = next(iter(prompt_queue.currently_running.values())) + if len(value) == 6: + (run_number, _, prompt, extra_data, outputs_to_execute, _) = value + else: + (run_number, _, prompt, extra_data, outputs_to_execute) = value + + if _requeue_guard[0] != run_number: + managed_outputs = 0 + for bm_uid in prompt: + if prompt[bm_uid]["class_type"] == "VHS_BatchManager": + for output_uid in prompt: + if prompt[output_uid]["class_type"] in ["VHS_VideoCombine"]: + for inp in prompt[output_uid]["inputs"].values(): + if inp == [bm_uid, 0]: + managed_outputs += 1 + _requeue_guard = [run_number, 0, managed_outputs, {}] + + _requeue_guard[1] += 1 + _requeue_guard[3][requeue_required[0]] = requeue_required[1] + + if _requeue_guard[1] == _requeue_guard[2] and max(_requeue_guard[3].values() or [False]): + requeue_workflow_unchecked() + + +if "VHS_video_formats" not in folder_paths.folder_names_and_paths: + folder_paths.folder_names_and_paths["VHS_video_formats"] = ((), {".json"}) +if len(folder_paths.folder_names_and_paths["VHS_video_formats"][1]) == 0: + folder_paths.folder_names_and_paths["VHS_video_formats"][1].add(".json") + + +def flatten_list(l: List[Any]) -> List[Any]: + ret: List[Any] = [] + for e in l: + if isinstance(e, list): + ret.extend(e) + else: + ret.append(e) + return ret + + +def iterate_format(video_format: Dict[str, Any], for_widgets: bool = True): + """Iterate over widget/argument definitions inside a format json.""" + + def indirector(cont: Any, index: Any): + if isinstance(cont[index], list) and ( + not for_widgets or (len(cont[index]) > 1 and not isinstance(cont[index][1], dict)) + ): + inp = yield cont[index] + if inp is not None: + cont[index] = inp + yield + + for k in video_format: + if k == "extra_widgets": + if for_widgets: + yield from video_format["extra_widgets"] + elif k.endswith("_pass"): + for i in range(len(video_format[k])): + yield from indirector(video_format[k], i) + if not for_widgets: + video_format[k] = flatten_list(video_format[k]) + else: + yield from indirector(video_format, k) + + +_external_formats_dir = os.environ.get("VHS_BASE_FORMATS_DIR") +if not _external_formats_dir: + _external_formats_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "video_formats") + + +@cached(5) +def get_video_formats(): + format_files: Dict[str, Any] = {} + + for format_name in folder_paths.get_filename_list("VHS_video_formats"): + format_files[format_name] = folder_paths.get_full_path("VHS_video_formats", format_name) + + for k in BUILTIN_VIDEO_FORMATS.keys(): + format_files.setdefault(k, ("__embedded__", k)) + + if _external_formats_dir and os.path.isdir(_external_formats_dir): + for item in os.scandir(_external_formats_dir): + if not item.is_file() or not item.name.endswith(".json"): + continue + format_files[item.name[:-5]] = item.path + + formats: List[str] = [] + format_widgets: Dict[str, List[Any]] = {} + + for format_name, src in format_files.items(): + if isinstance(src, tuple) and src[0] == "__embedded__": + video_format = copy.deepcopy(BUILTIN_VIDEO_FORMATS[src[1]]) + else: + with open(src, "r", encoding="utf-8") as stream: + video_format = json.load(stream) + + video_format.pop("save_metadata", None) + + if "gifski_pass" in video_format and gifski_path is None: + continue + + widgets = list(iterate_format(video_format)) + formats.append("video/" + format_name) + if widgets: + format_widgets["video/" + format_name] = widgets + + return formats, format_widgets + + +def apply_format_widgets(format_name: str, kwargs: Dict[str, Any]) -> Dict[str, Any]: + """Load a format definition and fill/resolve widget-driven parameters. + + `format_name` here is the part after 'video/', i.e. the json name without extension. + """ + + if format_name in BUILTIN_VIDEO_FORMATS: + video_format = copy.deepcopy(BUILTIN_VIDEO_FORMATS[format_name]) + else: + external_path = None + if _external_formats_dir: + p = os.path.join(_external_formats_dir, format_name + ".json") + if os.path.exists(p): + external_path = p + + if external_path is not None: + video_format_path = external_path + else: + video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name) + + with open(video_format_path, "r", encoding="utf-8") as stream: + video_format = json.load(stream) + + video_format.pop("save_metadata", None) + + for w in iterate_format(video_format): + if w[0] not in kwargs: + if len(w) > 2 and isinstance(w[2], dict) and "default" in w[2]: + default = w[2]["default"] + else: + if isinstance(w[1], list): + default = w[1][0] + else: + default = {"BOOLEAN": False, "INT": 0, "FLOAT": 0, "STRING": ""}[w[1]] + kwargs[w[0]] = default + logger.warning(f"Missing input for {w[0]} has been set to {default}") + + wit = iterate_format(video_format, False) + for w in wit: + while isinstance(w, list): + if len(w) == 1: + w = [Template(x).substitute(**kwargs) for x in w[0]] + break + elif isinstance(w[1], dict): + w = w[1][str(kwargs[w[0]])] + elif len(w) > 3: + w = Template(w[3]).substitute(val=kwargs[w[0]]) + else: + w = str(kwargs[w[0]]) + wit.send(w) + + video_format["save_metadata"] = "False" + + return video_format + + +# ----------------------------------------------------------------------------- +# Tensor -> bytes helpers +# ----------------------------------------------------------------------------- + + +def tensor_to_int(tensor: torch.Tensor, bits: int) -> np.ndarray: + arr = tensor.cpu().numpy() * (2**bits - 1) + 0.5 + return np.clip(arr, 0, (2**bits - 1)) + + +def tensor_to_shorts(tensor: torch.Tensor) -> np.ndarray: + return tensor_to_int(tensor, 16).astype(np.uint16) + + +def tensor_to_bytes(tensor: torch.Tensor) -> np.ndarray: + return tensor_to_int(tensor, 8).astype(np.uint8) + + +# ----------------------------------------------------------------------------- +# ffmpeg / gifski pipeline processes (generators) +# ----------------------------------------------------------------------------- + + +def ffmpeg_process( + args: List[str], video_format: Dict[str, Any], video_metadata: Dict[str, Any], file_path: str, env: Dict[str, str] +): + res = None + frame_data = yield + total_frames_output = 0 + + if video_format.get("save_metadata", "False") != "False": + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + metadata_path = os.path.join(folder_paths.get_temp_directory(), "metadata.txt") + + def escape_ffmpeg_metadata(key: str, value: Any) -> str: + value = str(value) + value = value.replace("\\", "\\\\") + value = value.replace(";", "\\;") + value = value.replace("#", "\\#") + value = value.replace("=", "\\=") + value = value.replace("\n", "\\\n") + return f"{key}={value}" + + with open(metadata_path, "w", encoding="utf-8") as f: + f.write(";FFMETADATA1\n") + if "prompt" in video_metadata: + f.write(escape_ffmpeg_metadata("prompt", json.dumps(video_metadata["prompt"])) + "\n") + if "workflow" in video_metadata: + f.write(escape_ffmpeg_metadata("workflow", json.dumps(video_metadata["workflow"])) + "\n") + for k, v in video_metadata.items(): + if k not in ["prompt", "workflow"]: + f.write(escape_ffmpeg_metadata(k, json.dumps(v)) + "\n") + + m_args = ( + args[:1] + + ["-i", metadata_path] + + args[1:] + + ["-metadata", "creation_time=now", "-movflags", "use_metadata_tags"] + ) + + with subprocess.Popen(m_args + [file_path], stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=env) as proc: + try: + while frame_data is not None: + proc.stdin.write(frame_data) + frame_data = yield + total_frames_output += 1 + proc.stdin.flush() + proc.stdin.close() + res = proc.stderr.read() + except BrokenPipeError: + err = proc.stderr.read() + if os.path.exists(file_path): + raise Exception("An error occurred in the ffmpeg subprocess:\n" + err.decode(*ENCODE_ARGS)) + print(err.decode(*ENCODE_ARGS), end="", file=sys.stderr) + logger.warning("An error occurred when saving with metadata") + + if res != b"": + with subprocess.Popen(args + [file_path], stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=env) as proc: + try: + while frame_data is not None: + proc.stdin.write(frame_data) + frame_data = yield + total_frames_output += 1 + proc.stdin.flush() + proc.stdin.close() + res = proc.stderr.read() + except BrokenPipeError: + res = proc.stderr.read() + raise Exception("An error occurred in the ffmpeg subprocess:\n" + res.decode(*ENCODE_ARGS)) + + yield total_frames_output + if res and len(res) > 0: + print(res.decode(*ENCODE_ARGS), end="", file=sys.stderr) + + +def gifski_process( + args: List[str], + dimensions: Tuple[int, int], + frame_rate: float, + video_format: Dict[str, Any], + file_path: str, + env: Dict[str, str], +): + if gifski_path is None: + raise ProcessLookupError("gifski is required for this output format but was not found") + + frame_data = yield + with subprocess.Popen( + args + video_format["main_pass"] + ["-f", "yuv4mpegpipe", "-"], + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) as procff: + with subprocess.Popen( + [gifski_path] + + video_format["gifski_pass"] + + ["-W", f"{dimensions[0]}", "-H", f"{dimensions[1]}"] + + ["-r", f"{frame_rate}"] + + ["-q", "-o", file_path, "-"], + stderr=subprocess.PIPE, + stdin=procff.stdout, + stdout=subprocess.PIPE, + env=env, + ) as procgs: + try: + while frame_data is not None: + procff.stdin.write(frame_data) + frame_data = yield + procff.stdin.flush() + procff.stdin.close() + resff = procff.stderr.read() + resgs = procgs.stderr.read() + outgs = procgs.stdout.read() + except BrokenPipeError: + procff.stdin.close() + resff = procff.stderr.read() + resgs = procgs.stderr.read() + raise Exception( + "An error occurred while creating gifski output\n" + "Make sure you are using gifski --version >=1.32.0\n" + + "ffmpeg: " + + resff.decode(*ENCODE_ARGS) + + "\n" + + "gifski: " + + resgs.decode(*ENCODE_ARGS) + ) + + if resff and len(resff) > 0: + print(resff.decode(*ENCODE_ARGS), end="", file=sys.stderr) + if resgs and len(resgs) > 0: + print(resgs.decode(*ENCODE_ARGS), end="", file=sys.stderr) + if outgs and len(outgs) > 0: + print(outgs.decode(*ENCODE_ARGS)) + + +def to_pingpong(inp: Any): + if not hasattr(inp, "__getitem__"): + inp = list(inp) + yield from inp + for i in range(len(inp) - 2, 0, -1): + yield inp[i] + + +# ----------------------------------------------------------------------------- +# VideoCombine node +# ----------------------------------------------------------------------------- + + +class TSVideoCombineNoMetadata: + @classmethod + def INPUT_TYPES(cls): + ffmpeg_formats, format_widgets = get_video_formats() + format_widgets["image/webp"] = [["lossless", "BOOLEAN", {"default": True}]] + return { + "required": { + "images": (imageOrLatent,), + "frame_rate": (floatOrInt, {"default": 8, "min": 1, "step": 1}), + "loop_count": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), + "filename_prefix": ("STRING", {"default": "AnimateDiff"}), + "format": ( + ["image/gif", "image/webp"] + ffmpeg_formats, + {"formats": format_widgets}, + ), + "pingpong": ("BOOLEAN", {"default": False}), + "save_output": ("BOOLEAN", {"default": True}), + }, + "optional": { + "audio": ("AUDIO",), + "meta_batch": ("VHS_BatchManager",), + "vae": ("VAE",), + }, + "hidden": ContainsAll( + { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + "unique_id": "UNIQUE_ID", + } + ), + } + + RETURN_TYPES = ("VHS_FILENAMES",) + RETURN_NAMES = ("Filenames",) + OUTPUT_NODE = True + CATEGORY = "Video Combine" + FUNCTION = "combine_video" + + def combine_video( + self, + frame_rate: int, + loop_count: int, + images=None, + latents=None, + filename_prefix: str = "AnimateDiff", + format: str = "image/gif", + pingpong: bool = False, + save_output: bool = True, + prompt=None, + extra_pnginfo=None, + audio=None, + unique_id=None, + manual_format_widgets=None, + meta_batch=None, + vae=None, + **kwargs, + ): + if latents is not None: + images = latents + if images is None: + return ((save_output, []),) + + if vae is not None: + if isinstance(images, dict): + images = images["samples"] + else: + vae = None + + if isinstance(images, torch.Tensor) and images.size(0) == 0: + return ((save_output, []),) + + num_frames = len(images) + pbar = ProgressBar(num_frames) + + if vae is not None: + downscale_ratio = getattr(vae, "downscale_ratio", 8) + width = images.size(-1) * downscale_ratio + height = images.size(-2) * downscale_ratio + frames_per_batch = (1920 * 1080 * 16) // (width * height) or 1 + + def batched(it, n): + while batch := tuple(itertools.islice(it, n)): + yield batch + + def batched_decode(latents_iter, vae_obj, fpb): + for batch in batched(iter(latents_iter), fpb): + latent_batch = torch.from_numpy(np.array(batch)) + yield from vae_obj.decode(latent_batch) + + images = batched_decode(images, vae, frames_per_batch) + first_image = next(images) + images = itertools.chain([first_image], images) + while len(first_image.shape) > 3: + first_image = first_image[0] + else: + first_image = images[0] + images = iter(images) + + output_dir = folder_paths.get_output_directory() if save_output else folder_paths.get_temp_directory() + (full_output_folder, filename, _, subfolder, _) = folder_paths.get_save_image_path(filename_prefix, output_dir) + output_files: List[str] = [] + + metadata = PngInfo() + video_metadata: Dict[str, Any] = {} + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + video_metadata["prompt"] = json.dumps(prompt) + + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + video_metadata[x] = extra_pnginfo[x] + extra_options = extra_pnginfo.get("workflow", {}).get("extra", {}) + else: + extra_options = {} + + metadata.add_text("CreationTime", datetime.datetime.now().isoformat(" ")[:19]) + + if meta_batch is not None and unique_id in getattr(meta_batch, "outputs", {}): + (counter, output_process) = meta_batch.outputs[unique_id] + else: + max_counter = 0 + matcher = re.compile(f"{re.escape(filename)}_(\\d+)\\D*\\..+", re.IGNORECASE) + for existing_file in os.listdir(full_output_folder): + match = matcher.fullmatch(existing_file) + if match: + file_counter = int(match.group(1)) + max_counter = max(max_counter, file_counter) + counter = max_counter + 1 + output_process = None + + first_image_file = f"{filename}_{counter:05}.png" + png_path = os.path.join(full_output_folder, first_image_file) + if extra_options.get("VHS_MetadataImage", True) is not False: + Image.fromarray(tensor_to_bytes(first_image)).save(png_path, pnginfo=metadata, compress_level=4) + output_files.append(png_path) + + format_type, format_ext = format.split("/") + + if format_type == "image": + if meta_batch is not None: + raise Exception("Pillow('image/') formats are not compatible with batched output") + + image_kwargs: Dict[str, Any] = {} + if format_ext == "gif": + image_kwargs["disposal"] = 2 + if format_ext == "webp": + exif = Image.Exif() + exif[ExifTags.IFD.Exif] = {36867: datetime.datetime.now().isoformat(" ")[:19]} + image_kwargs["exif"] = exif + image_kwargs["lossless"] = kwargs.get("lossless", True) + + out_file = f"{filename}_{counter:05}.{format_ext}" + out_path = os.path.join(full_output_folder, out_file) + + if pingpong: + images = to_pingpong(images) + + def frames_gen(images_iter): + for i in images_iter: + pbar.update(1) + yield Image.fromarray(tensor_to_bytes(i)) + + frames = frames_gen(images) + next(frames).save( + out_path, + format=format_ext.upper(), + save_all=True, + append_images=frames, + duration=round(1000 / frame_rate), + loop=loop_count, + compress_level=4, + **image_kwargs, + ) + output_files.append(out_path) + file_for_preview = out_file + + else: + if ffmpeg_path is None: + raise ProcessLookupError( + "ffmpeg is required for video outputs and could not be found.\n" + "In order to use video outputs, you must either:\n" + "- Install imageio-ffmpeg with pip,\n" + "- Place a ffmpeg executable in the ComfyUI working directory, or\n" + "- Install ffmpeg and add it to the system path." + ) + + if manual_format_widgets is not None: + logger.warning( + "Format args can now be passed directly. The manual_format_widgets argument is deprecated." + ) + kwargs.update(manual_format_widgets) + + has_alpha = first_image.shape[-1] == 4 + kwargs["has_alpha"] = has_alpha + + video_format = apply_format_widgets(format_ext, kwargs) + dim_alignment = video_format.get("dim_alignment", 2) + + if (first_image.shape[1] % dim_alignment) or (first_image.shape[0] % dim_alignment): + to_pad = (-first_image.shape[1] % dim_alignment, -first_image.shape[0] % dim_alignment) + padding = ( + to_pad[0] // 2, + to_pad[0] - to_pad[0] // 2, + to_pad[1] // 2, + to_pad[1] - to_pad[1] // 2, + ) + padfunc = torch.nn.ReplicationPad2d(padding) + + def pad(image): + image = image.permute((2, 0, 1)) + padded = padfunc(image.to(dtype=torch.float32)) + return padded.permute((1, 2, 0)) + + images = map(pad, images) + dimensions = ( + -first_image.shape[1] % dim_alignment + first_image.shape[1], + -first_image.shape[0] % dim_alignment + first_image.shape[0], + ) + logger.warning("Output images were not of valid resolution; padding was applied") + else: + dimensions = (first_image.shape[1], first_image.shape[0]) + + if pingpong: + if meta_batch is not None: + logger.error("pingpong is incompatible with batched output") + images = to_pingpong(images) + if num_frames > 2: + num_frames = num_frames + (num_frames - 2) + pbar.total = num_frames + + loop_args = ["-vf", f"loop=loop={loop_count}:size={num_frames}"] if loop_count > 0 else [] + + if video_format.get("input_color_depth", "8bit") == "16bit": + images = map(tensor_to_shorts, images) + i_pix_fmt = "rgba64" if has_alpha else "rgb48" + else: + images = map(tensor_to_bytes, images) + i_pix_fmt = "rgba" if has_alpha else "rgb24" + + out_file = f"{filename}_{counter:05}.{video_format['extension']}" + out_path = os.path.join(full_output_folder, out_file) + + bitrate_arg: List[str] = [] + bitrate = video_format.get("bitrate") + if bitrate is not None: + if video_format.get("megabit") == "True": + bitrate_arg = ["-b:v", str(bitrate) + "M"] + else: + bitrate_arg = ["-b:v", str(bitrate) + "K"] + + args = [ + ffmpeg_path, + "-v", + "error", + "-f", + "rawvideo", + "-pix_fmt", + i_pix_fmt, + "-color_range", + "pc", + "-colorspace", + "rgb", + "-color_primaries", + "bt709", + "-color_trc", + video_format.get("fake_trc", "iec61966-2-1"), + "-s", + f"{dimensions[0]}x{dimensions[1]}", + "-r", + str(frame_rate), + "-i", + "-", + ] + loop_args + + images = map(lambda x: x.tobytes(), images) + + env = os.environ.copy() + if "environment" in video_format: + env.update(video_format["environment"]) + + if "pre_pass" in video_format: + if meta_batch is not None: + raise Exception("Formats requiring pre_pass are incompatible with Batch Manager") + images = [b"".join(images)] + os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) + in_args_len = args.index("-i") + 2 + pre_pass_args = args[:in_args_len] + video_format["pre_pass"] + merge_filter_args(pre_pass_args) + try: + subprocess.run(pre_pass_args, input=images[0], env=env, capture_output=True, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occurred in the ffmpeg prepass:\n" + e.stderr.decode(*ENCODE_ARGS)) + + if "inputs_main_pass" in video_format: + in_args_len = args.index("-i") + 2 + args = args[:in_args_len] + video_format["inputs_main_pass"] + args[in_args_len:] + + if output_process is None: + if "gifski_pass" in video_format: + format = "image/gif" + output_process = gifski_process(args, dimensions, frame_rate, video_format, out_path, env) + audio = None + else: + args += video_format["main_pass"] + bitrate_arg + merge_filter_args(args) + output_process = ffmpeg_process(args, video_format, video_metadata, out_path, env) + + output_process.send(None) + if meta_batch is not None: + meta_batch.outputs[unique_id] = (counter, output_process) + + for image_bytes in images: + pbar.update(1) + output_process.send(image_bytes) + + if meta_batch is not None: + requeue_workflow((meta_batch.unique_id, not meta_batch.has_closed_inputs)) + + if meta_batch is None or meta_batch.has_closed_inputs: + try: + total_frames_output = output_process.send(None) + output_process.send(None) + except StopIteration: + total_frames_output = num_frames + + if meta_batch is not None: + meta_batch.outputs.pop(unique_id, None) + if len(meta_batch.outputs) == 0: + meta_batch.reset() + else: + return {"ui": {"unfinished_batch": [True]}, "result": ((save_output, []),)} + + output_files.append(out_path) + + a_waveform = None + if audio is not None: + try: + a_waveform = audio["waveform"] + except Exception: + a_waveform = None + + if a_waveform is not None: + output_with_audio = f"{filename}_{counter:05}-audio.{video_format['extension']}" + output_with_audio_path = os.path.join(full_output_folder, output_with_audio) + + if "audio_pass" not in video_format: + logger.warning("Selected video format does not have explicit audio support") + video_format["audio_pass"] = ["-c:a", "libopus"] + + channels = audio["waveform"].size(1) + min_audio_dur = total_frames_output / frame_rate + 1 + apad = ( + [] + if video_format.get("trim_to_audio", "True") != "False" + else ["-af", f"apad=whole_dur={min_audio_dur}"] + ) + + mux_args = ( + [ + ffmpeg_path, + "-v", + "error", + "-n", + "-i", + out_path, + "-ar", + str(audio["sample_rate"]), + "-ac", + str(channels), + "-f", + "f32le", + "-i", + "-", + "-c:v", + "copy", + ] + + video_format["audio_pass"] + + apad + + ["-shortest", output_with_audio_path] + ) + + audio_data = audio["waveform"].squeeze(0).transpose(0, 1).numpy().tobytes() + merge_filter_args(mux_args, "-af") + try: + res = subprocess.run(mux_args, input=audio_data, env=env, capture_output=True, check=True) + except subprocess.CalledProcessError as e: + raise Exception("An error occurred in the ffmpeg subprocess:\n" + e.stderr.decode(*ENCODE_ARGS)) + + if res.stderr: + print(res.stderr.decode(*ENCODE_ARGS), end="", file=sys.stderr) + + output_files.append(output_with_audio_path) + file_for_preview = output_with_audio + else: + file_for_preview = out_file + + if extra_options.get("VHS_KeepIntermediate", True) is False: + for intermediate in output_files[1:-1]: + if os.path.exists(intermediate): + os.remove(intermediate) + + preview = { + "filename": file_for_preview, + "subfolder": subfolder, + "type": "output" if save_output else "temp", + "format": format, + "frame_rate": frame_rate, + "workflow": first_image_file, + "fullpath": output_files[-1], + } + if num_frames == 1 and "png" in format and "%03d" in file_for_preview: + preview["format"] = "image/png" + preview["filename"] = file_for_preview.replace("%03d", "001") + + return {"ui": {"gifs": [preview]}, "result": ((save_output, output_files),)} diff --git a/zavodik/nodes/comfyui-teskors-utils/requirements.txt b/zavodik/nodes/comfyui-teskors-utils/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c5f7e9ed2900f39016241890b97d7f01dc04cac --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/requirements.txt @@ -0,0 +1,6 @@ +numpy +torch +opencv-python +pillow +tqdm +scipy diff --git a/zavodik/nodes/comfyui-teskors-utils/web/js/ts_video_preview.js b/zavodik/nodes/comfyui-teskors-utils/web/js/ts_video_preview.js new file mode 100644 index 0000000000000000000000000000000000000000..88705a40fd66ffa13295f82a681e2b6ed8d1dca6 --- /dev/null +++ b/zavodik/nodes/comfyui-teskors-utils/web/js/ts_video_preview.js @@ -0,0 +1,372 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; + +/** + * TS Video Preview extension (always-audio) + * - VHS-like preview (supports /vhs/viewvideo advanced previews when available) + * - Audio ON by default (no mute feature, no hover-mute) + * - Robust refresh when tab becomes visible again (fixes stale preview when generated in background) + */ + +function chainCallback(object, property, callback) { + if (!object) return; + if (property in object && object[property]) { + const orig = object[property]; + object[property] = function () { + const r = orig.apply(this, arguments); + return callback.apply(this, arguments) ?? r; + }; + } else { + object[property] = callback; + } +} + +function getSetting(id, fallback) { + try { + return app?.ui?.settings?.getSettingValue?.(id) ?? fallback; + } catch { + return fallback; + } +} + +function fitHeight(node) { + try { + node.setSize?.([node.size[0], node.computeSize?.([node.size[0], node.size[1]])?.[1] ?? node.size[1]]); + } catch {} + node?.graph?.setDirtyCanvas?.(true); + app.graph?.setDirtyCanvas?.(true, true); +} + +function startDraggingItems(node, pointer) { + app.canvas.emitBeforeChange?.(); + app.canvas.graph?.beforeChange?.(); + pointer.finally = () => { + app.canvas.isDragging = false; + app.canvas.graph?.afterChange?.(); + app.canvas.emitAfterChange?.(); + }; + app.canvas.processSelect?.(node, pointer.eDown, true); + app.canvas.isDragging = true; +} + +function processDraggedItems(e) { + if (e.shiftKey || window.LiteGraph?.alwaysSnapToGrid) { + app.canvas?.graph?.snapToGrid?.(app.canvas.selectedItems); + } + app.canvas.dirty_canvas = true; + app.canvas.dirty_bgcanvas = true; + app.canvas.onNodeMoved?.(app.canvas.selectedItems?.[0]); +} + +function allowDragFromWidget(widget) { + widget.onPointerDown = function (pointer, node) { + pointer.onDragStart = () => startDraggingItems(node, pointer); + pointer.onDragEnd = processDraggedItems; + app.canvas.dirty_canvas = true; + return true; + }; +} + +function wirePreviewEventsToCanvas(el) { + const forward = (name, cbName) => { + el.addEventListener( + name, + (e) => { + e.preventDefault(); + return app.canvas?.[cbName]?.(e); + }, + true + ); + }; + forward("contextmenu", "_mousedown_callback"); + forward("pointerdown", "_mousedown_callback"); + forward("mousewheel", "_mousewheel_callback"); + forward("pointermove", "_mousemove_callback"); + forward("pointerup", "_mouseup_callback"); + + el.addEventListener("dragover", (e) => { + e.preventDefault(); + e.dataTransfer.dropEffect = "copy"; + app.dragOverNode = el; + }); +} + +function isTSNode(nodeData) { + return nodeData?.name === "TSVideoCombine" || nodeData?.name === "TSVideoCombineNoMetadata"; +} + +function getVideoPreviewWidget(node) { + return node?.widgets?.find?.((x) => x?.name === "videopreview") ?? null; +} + +function safeUpdateAllTSPreviews() { + const nodes = app.graph?._nodes || []; + for (const n of nodes) { + const w = getVideoPreviewWidget(n); + if (!w) continue; + + if (n.__pendingPreviewParams) { + n.updateParameters?.(n.__pendingPreviewParams, true); + n.__pendingPreviewParams = null; + continue; + } + + try { + w.updateSource?.(); + } catch {} + } + app.graph?.setDirtyCanvas?.(true, true); +} + +function installVisibilityHooksOnce() { + if (window.__tsPreviewVisibilityHookInstalled) return; + window.__tsPreviewVisibilityHookInstalled = true; + + document.addEventListener("visibilitychange", () => { + if (!document.hidden) safeUpdateAllTSPreviews(); + }); + + window.addEventListener("focus", () => { + if (!document.hidden) safeUpdateAllTSPreviews(); + }); +} + +function addVideoPreview(nodeType, isInput = true) { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + const node = this; + + const element = document.createElement("div"); + const w = node.addDOMWidget("videopreview", "preview", element, { + serialize: false, + hideOnZoom: false, + getValue() { + return element.value; + }, + setValue(v) { + element.value = v; + }, + }); + + allowDragFromWidget(w); + wirePreviewEventsToCanvas(element); + + w.value = { + hidden: false, + paused: false, + params: {}, + }; + + w.parentEl = document.createElement("div"); + w.parentEl.className = "vhs_preview"; + w.parentEl.style.width = "100%"; + element.appendChild(w.parentEl); + + w.videoEl = document.createElement("video"); + w.videoEl.controls = true; + w.videoEl.loop = true; + w.videoEl.muted = true; + w.videoEl.volume = 1.0; + w.videoEl.style.width = "100%"; + w.videoEl.playsInline = true; + + w.imgEl = document.createElement("img"); + w.imgEl.style.width = "100%"; + w.imgEl.hidden = true; + + w.videoEl.addEventListener("loadedmetadata", () => { + w.aspectRatio = w.videoEl.videoWidth / w.videoEl.videoHeight; + fitHeight(node); + }); + + w.imgEl.onload = () => { + w.aspectRatio = w.imgEl.naturalWidth / w.imgEl.naturalHeight; + fitHeight(node); + }; + + w.videoEl.addEventListener("error", () => { + w.parentEl.hidden = true; + fitHeight(node); + }); + + w.parentEl.appendChild(w.videoEl); + w.parentEl.appendChild(w.imgEl); + + w.computeSize = function (width) { + if (this.aspectRatio && !this.parentEl.hidden) { + const h = (node.size[0] - 20) / this.aspectRatio + 10; + this.computedHeight = h + 10; + return [width, h]; + } + return [width, -4]; + }; + + let timeout = null; + + node.updateParameters = (params, forceUpdate) => { + if (typeof w.value !== "object") w.value = { hidden: false, paused: false, params: {} }; + if (!w.value.params) w.value.params = {}; + + const changed = Object.entries(params).some(([k, v]) => w.value.params[k] !== v); + if (!changed && !forceUpdate) return; + + Object.assign(w.value.params, params); + + if (timeout) clearTimeout(timeout); + if (forceUpdate) w.updateSource(); + else timeout = setTimeout(() => w.updateSource(), 120); + }; + + w.updateSource = function () { + if (!this.value?.params) return; + + const params = { ...this.value.params, timestamp: Date.now() }; + this.parentEl.hidden = !!this.value.hidden; + + const fmt = params.format || ""; + const major = fmt.split("/")[0]; + + let advp = getSetting("VHS.AdvancedPreviews", "Input Only"); + if (advp === "Never") advp = false; + else if (advp === "Input Only") advp = !!isInput; + else advp = true; + + if (major === "video" || fmt === "folder" || (advp && fmt.split("/")[1] === "gif")) { + this.videoEl.autoplay = !this.value.paused && !this.value.hidden; + + this.videoEl.muted = true; + + if (!advp) { + this.videoEl.src = api.apiURL("/view?" + new URLSearchParams(params)); + } else { + let targetWidth = (node.size[0] - 20) * 2 || 256; + const minW = getSetting("VHS.AdvancedPreviewsMinWidth", 0); + if (targetWidth < minW) targetWidth = minW; + + if (!params.custom_width || !params.custom_height) { + params.force_size = targetWidth + "x?"; + } else { + const ar = params.custom_width / params.custom_height; + params.force_size = targetWidth + "x" + targetWidth / ar; + } + + params.deadline = getSetting("VHS.AdvancedPreviewsDeadline", 0); + + this.videoEl.src = api.apiURL("/vhs/viewvideo?" + new URLSearchParams(params)); + } + + this.videoEl.hidden = false; + this.imgEl.hidden = true; + return; + } + + if (major === "image") { + this.imgEl.src = api.apiURL("/view?" + new URLSearchParams(params)); + this.videoEl.hidden = true; + this.imgEl.hidden = false; + } + }; + + w.callback = w.updateSource; + }); +} + +function addPreviewOptions(nodeType) { + chainCallback(nodeType.prototype, "getExtraMenuOptions", function (_, options) { + const w = getVideoPreviewWidget(this); + if (!w) return; + + let url = null; + + if (w.videoEl?.hidden === false && w.videoEl?.src) { + url = api.apiURL("/view?" + new URLSearchParams(w.value.params)); + url = url.replace("%2503d", "001"); + } else if (w.imgEl?.hidden === false && w.imgEl?.src) { + url = w.imgEl.src; + } + + const optNew = []; + + if (url) { + optNew.push( + { + content: "Open preview", + callback: () => window.open(url, "_blank"), + }, + { + content: "Save preview", + callback: () => { + const a = document.createElement("a"); + a.href = url; + a.setAttribute("download", w.value.params.filename || "preview"); + document.body.append(a); + a.click(); + requestAnimationFrame(() => a.remove()); + }, + } + ); + } + + if (w.videoEl?.hidden === false) { + optNew.push({ + content: (w.value.paused ? "Resume" : "Pause") + " preview", + callback: () => { + if (w.value.paused) w.videoEl?.play(); + else w.videoEl?.pause(); + w.value.paused = !w.value.paused; + }, + }); + } + + optNew.push({ + content: (w.value.hidden ? "Show" : "Hide") + " preview", + callback: () => { + if (!w.videoEl.hidden && !w.value.hidden) w.videoEl.pause(); + else if (w.value.hidden && !w.videoEl.hidden && !w.value.paused) w.videoEl.play(); + + w.value.hidden = !w.value.hidden; + w.parentEl.hidden = w.value.hidden; + fitHeight(this); + }, + }); + + optNew.push({ + content: "Sync preview", + callback: () => { + for (let p of document.getElementsByClassName("vhs_preview")) { + for (let child of p.children) { + if (child.tagName === "VIDEO") child.currentTime = 0; + else if (child.tagName === "IMG") child.src = child.src; + } + } + }, + }); + + + options.unshift(...optNew); + }); +} + +app.registerExtension({ + name: "teskors.utils.ts_video_preview", + async beforeRegisterNodeDef(nodeType, nodeData) { + if (!isTSNode(nodeData)) return; + + installVisibilityHooksOnce(); + + chainCallback(nodeType.prototype, "onExecuted", function (message) { + if (message?.gifs?.length) { + const p = message.gifs[0]; + + if (document.hidden) { + this.__pendingPreviewParams = p; + return; + } + + this.updateParameters?.(p, true); + } + }); + + addVideoPreview(nodeType, false); + addPreviewOptions(nodeType); + }, +}); diff --git "a/zavodik/\320\237\321\200\320\276\320\274\321\202\321\213.txt" "b/zavodik/\320\237\321\200\320\276\320\274\321\202\321\213.txt" new file mode 100644 index 0000000000000000000000000000000000000000..25ac52ddd8a9fb08ea7cb76715fd723a56afe6b3 --- /dev/null +++ "b/zavodik/\320\237\321\200\320\276\320\274\321\202\321\213.txt" @@ -0,0 +1 @@ +1) Чат, опиши это фото на английском языке для nano banana pro Только ОПИШИ НЕ НАДО ГЕНЕРИРОВАТЬ, только без уточнения черт лица и внешности, также подробно уточни описание расположения ее рук, в какой позе конкретно находится, губ, рта, фигуры, направления взгляда, цвета одежды, стиля и материала, место нахождения, атмосферы, каждый атрибут описывай конкретно, стиль фото должен быть как будто снятый на обычный телефон без профессионализма, и начинай описание со слов "A girl" diff --git "a/zavodik/\320\242\320\265\320\274\320\277\320\273\320\265\320\271\321\202.txt" "b/zavodik/\320\242\320\265\320\274\320\277\320\273\320\265\320\271\321\202.txt" new file mode 100644 index 0000000000000000000000000000000000000000..4beb587ed688e56f478dfa8a9cd6780eebe268cb --- /dev/null +++ "b/zavodik/\320\242\320\265\320\274\320\277\320\273\320\265\320\271\321\202.txt" @@ -0,0 +1 @@ +https://cloud.vast.ai/?ref_id=413074&creator_id=413074&name=Zavod \ No newline at end of file