repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_template/__init__.py
src/cqparts_template/__init__.py
""" Copyright 2018 Peter Boin Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __release_ready__ = False # TODO: remove to stop blocking build __title__ = "cqparts_<lib_name>" # TODO __description__ = "<brief description>" # TODO __url__ = "<library url>" # TODO __version__ = "0.1.0" __author__ = "<your name>" # TODO # ------ Registration from cqparts.search import ( find as _find, search as _search, register as _register, ) from cqparts.search import common_criteria module_criteria = { 'library': __name__, } register = common_criteria(**module_criteria)(_register) search = common_criteria(**module_criteria)(_search) find = common_criteria(**module_criteria)(_find)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_template/catalogue/scripts/build.py
src/cqparts_template/catalogue/scripts/build.py
#!/usr/bin/env python import argparse import os import inspect import csv import cqparts from cqparts.catalogue import JSONCatalogue # TODO: import your library # TODO: if you don't offer a catalogue for your library, then # remove this `scripts` folder. import cqparts_template from cqparts_template.clamp.peg import ClothesPeg # ---------- Parameter Defaults ---------- _this_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) def _relative_path_to(path_list, filename): """Get a neat relative path to files relative to the CWD""" return os.path.join( os.path.relpath(os.path.join(*path_list), os.getcwd()), filename ) DEFAULT_OUTPUT = _relative_path_to([_this_path, '..'], 'pegs.json') DEFAULT_INPUT = _relative_path_to([_this_path], 'pegs.csv') parser = argparse.ArgumentParser( prog='Build catalogue', # TODO: change program name ) parser.add_argument( '-o', '--output', dest='output', default=DEFAULT_OUTPUT, metavar="FILE", help='catalogue file to write', ) parser.add_argument( '-i', '--input', dest='input', default=DEFAULT_INPUT, metavar="FILE", help='component parameters file', ) args = parser.parse_args() # ---------- Component Builders ---------- # TODO: build your own objects in whatever way suits your application. def build_obj(cls, **kwargs): # Take any parameters relevant to the given class from kwargs # (all other parameters ignored) class_params = cls.class_params() obj_kwargs = { key: kwargs.pop(key, param.default) for (key, param) in class_params.items() } return cls(**obj_kwargs) # ---------- Create Catalogue ---------- catalogue = JSONCatalogue(args.output, clean=True) with open(args.input, "rb" ) as csv_fileio: csv_reader = csv.DictReader(csv_fileio) for line in csv_reader: obj = build_obj(ClothesPeg, **line) criteria = { key: line[key] for key in line.keys() if (not hasattr(obj, key)) and (key not in ('ID',)) } catalogue.add(id=line['ID'], obj=obj, criteria=criteria)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_template/clamp/peg.py
src/cqparts_template/clamp/peg.py
# TODO: illustrative model only; remove this file import cadquery import cqparts from cqparts.params import * from cqparts.display import render_props from cqparts import constraint from cqparts.utils import CoordSystem from .. import register class _PegSide(cqparts.Part): """ One side of a wooden clothes peg. Note that this docstring does not get rendered in the sphinx automated documentation, this is because the class is prefixed with a ``_``. Also: idiomatic Python dictates that components with a ``_`` prefix are not intended for an end-user, which is why they're not documented. """ length = PositiveFloat() width = PositiveFloat() depth = PositiveFloat() tip_chamfer = PositiveFloat() handle_tip_depth = PositiveFloat() handle_length = PositiveFloat() # spring spring_diam = PositiveFloat() spring_arm_length = PositiveFloat() spring_wire_diam = PositiveFloat() # major indent major_radius = PositiveFloat() major_depth = PositiveFloat() major_offset = PositiveFloat() # minor indent minor_radius = PositiveFloat() minor_depth = PositiveFloat() minor_offset = PositiveFloat() # Default material to render _render = render_props(template='wood') def make(self): # Main profile shape of peg points = [ (0, 0), (self.length, 0), (self.length, self.handle_tip_depth), (self.length - self.handle_length, self.depth), (self.tip_chamfer, self.depth), (0, self.depth - self.tip_chamfer), ] side = cadquery.Workplane('XY') \ .moveTo(*points[0]).polyline(points[1:]).close() \ .extrude(self.width) # cut spring side = side.cut(cadquery.Workplane('XY') \ .moveTo(self.length - self.handle_length, self.depth) \ .circle(self.spring_diam / 2).extrude(self.width) ) # cut indents def cut_indent(obj, radius, depth, offset): return obj.cut(cadquery.Workplane('XY') \ .moveTo(offset, self.depth + (radius - depth)) \ .circle(radius).extrude(self.width) ) side = cut_indent( obj=side, radius=self.major_radius, depth=self.major_depth, offset=self.major_offset, ) side = cut_indent( obj=side, radius=self.minor_radius, depth=self.minor_depth, offset=self.minor_offset, ) return side @property def mate_spring_center(self): # mate in the center of the spring, z-axis along spring center return constraint.Mate(self, CoordSystem( origin=(self.length - self.handle_length, self.depth, self.width / 2), )) @property def mate_side(self): # mate in middle of outside edge, z-axis into peg return constraint.Mate(self, CoordSystem( origin=(self.length / 2, 0, self.width / 2), xDir=(0, 0, -1), normal=(0, 1, 0), )) class _Spring(cqparts.Part): diam = PositiveFloat() arm_length = PositiveFloat() wire_diam = PositiveFloat() width = PositiveFloat() def make(self): spring = cadquery.Workplane('XY', origin=(0, 0, -(self.width / 2 + self.wire_diam))) \ .circle(self.diam / 2).circle((self.diam / 2) - self.wire_diam) \ .extrude(self.width + (2 * self.wire_diam)) return spring @register(module=__name__, name='clothespeg', type='peg_clamp') class ClothesPeg(cqparts.Assembly): """ A common household clothes-peg .. image:: /_static/img/template/peg.png """ length = PositiveFloat(75, doc="length of peg side") width = PositiveFloat(10, doc="peg width") depth = PositiveFloat(7, doc="depth of peg side, half peg's full depth") tip_chamfer = PositiveFloat(5, doc="chamfer at tip") handle_tip_depth = PositiveFloat(2, doc="depth at handle's tip") handle_length = PositiveFloat(30, doc="length of tapered handle") # spring spring_diam = PositiveFloat(5, doc="diameter of spring's core") spring_arm_length = PositiveFloat(17.5, doc="length of spring's arm converting torque to closing force") spring_wire_diam = PositiveFloat(1.3, doc="diamter of spring's wire") # major indent major_radius = PositiveFloat(10, doc="large indentation's radius") major_depth = PositiveFloat(2, doc="large indentation's depth") major_offset = PositiveFloat(17, doc="large indentation center's distance from tip") # minor indent minor_radius = PositiveFloat(1, doc="small indentation's radius") minor_depth = PositiveFloat(1, doc="small indentation's depth") minor_offset = PositiveFloat(31, doc="small indentation center's distance from tip") def make_components(self): params = self.params(hidden=False) # common to _PegSide return { 'bottom': _PegSide(**params), 'top': _PegSide(**params), 'spring': _Spring( diam=self.spring_diam, arm_length=self.spring_arm_length, wire_diam=self.spring_wire_diam, width=self.width, ), } def make_constraints(self): bottom = self.components['bottom'] top = self.components['top'] spring = self.components['spring'] return [ constraint.Fixed(bottom.mate_side), constraint.Coincident( top.mate_spring_center, bottom.mate_spring_center + CoordSystem(normal=(0, 0, -1)) ), constraint.Coincident( spring.mate_origin, bottom.mate_spring_center ), ]
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_template/clamp/__init__.py
src/cqparts_template/clamp/__init__.py
__all__ = [ 'ClothesPeg', ] from .peg import ClothesPeg
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/runtests.py
tests/runtests.py
#!/usr/bin/env python import sys import unittest import re import functools import logging import mock import os import inspect from contextlib import contextmanager _this_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) class MyTestRunner(unittest.runner.TextTestRunner): def __init__(self, *args, **kwargs): """ Append blacklist & whitelist attributes to TestRunner instance """ # skip self.skip_whitelist = set(kwargs.pop('skip_whitelist', [])) self.skip_blacklist = set(kwargs.pop('skip_blacklist', [])) # ignore self.ignore_whitelist = set(kwargs.pop('ignore_whitelist', [])) self.ignore_blacklist = set(kwargs.pop('ignore_blacklist', [])) super(MyTestRunner, self).__init__(*args, **kwargs) @classmethod def test_iter(cls, suite): """ Iterate through test suites, and yield individual tests """ for test in suite: if isinstance(test, unittest.TestSuite): for t in cls.test_iter(test): yield t else: yield test def run(self, testlist): # Change given testlist into a TestSuite suite = unittest.TestSuite() # Add each test in testlist, apply skip mechanism if necessary for test in self.test_iter(testlist): # Determine if test should be IGNORED ignore = bool(self.ignore_whitelist) test_labels = getattr(test, '_labels', set()) if test_labels & self.ignore_whitelist: ignore = False if test_labels & self.ignore_blacklist: ignore = True if not ignore: # Determine if test should be SKIPPED skip = bool(self.skip_whitelist) test_labels = getattr(test, '_labels', set()) if test_labels & self.skip_whitelist: skip = False if test_labels & self.skip_blacklist: skip = True if skip: # Test should be skipped. # replace original method with function "skip" test_method = getattr(test, test._testMethodName) # Create a "skip test" wrapper for the actual test method @functools.wraps(test_method) def skip_wrapper(*args, **kwargs): raise unittest.SkipTest('label exclusion') skip_wrapper.__unittest_skip__ = True skip_wrapper.__unittest_skip_why__ = 'label exclusion' setattr(test, test._testMethodName, skip_wrapper) suite.addTest(test) # Resume normal TextTestRunner function with the new test suite return super(MyTestRunner, self).run(suite) @contextmanager def readonly_tinydb(path=None): if path is None: # set path default, should be repository root path = os.path.realpath(os.path.join(_this_path, '..')) else: path = os.path.realpath(path) # __builtin__.open replcement method from codecs import open as codecs_open def _codecs_open_readonly(name, mode='r', **kwargs): if os.path.realpath(name).startswith(path): # file being used is in this repository return codecs_open(name, mode='r', **kwargs) # ignore given mode; force read-only # otherwise, the file is probably in a temporary, read/writeable location return codecs_open(name, mode=mode, **kwargs) with mock.patch('tinydb.storages.codecs.open', _codecs_open_readonly): with mock.patch('tinydb.storages.os.utime'): yield # ------------------- mainline ------------------- if __name__ == "__main__": import argparse # ---- create commandline parser parser = argparse.ArgumentParser(description='Find and run cqparts tests.') label_list_type = lambda v: re.split(r'\W+', v) # test selection group = parser.add_argument_group("Test Selection") group.add_argument( '-p', '--pattern', dest='pattern', default='test_*', help="filename pattern", ) group.add_argument( '-m', '--module', dest='module', default=None, help="only run tests from the 't_<module>' folder", ) # label filtering group = parser.add_argument_group( "Label filters (skip & ignore)", description="tests can be skipped, or ignored based on their label", ) group.add_argument( '-s', '--skip', dest='skip_blacklist', type=label_list_type, default=[], help="list of labels to skip", ) group.add_argument( '-ds', '--dontskip', dest='skip_whitelist', type=label_list_type, default=[], help="list of labels to test (skip all others)", ) group.add_argument( '-i', '--ignore', dest='ignore_blacklist', type=label_list_type, default=[], help="list of labels to ignore", ) group.add_argument( '-di', '--dontignore', dest='ignore_whitelist', type=label_list_type, default=[], help="list of labels to test (ignore all others)", ) # logging group = parser.add_argument_group("Logging") def logging_level_type(value): if hasattr(logging, value): # named type: DEBUG, INFO, WARN, ERROR, CRITICAL ret = getattr(logging, value) if isinstance(ret, int): return ret else: try: return int(value) except ValueError: pass raise argparse.ArgumentTypeError("bad logging level: %r" % value) group.add_argument( '-l', '--logginglevel', dest='logginglevel', type=logging_level_type, default=None, help="if specified, logging is enabled", ) args = parser.parse_args() # enable logging if args.logginglevel: import cadquery cadquery.freecad_impl.console_logging.enable( level=args.logginglevel, ) # ---- Discover and run tests # Load tests class MyLoader(unittest.TestLoader): ignore_modules = ['partslib'] def _get_module_from_name(self, name): # ignore modules outside specific module test folder (if given) if args.module is not None: if not name.startswith('t_%s.' % args.module): return None # ignore modules that are in the ignore list try: __import__(name) return sys.modules[name] except ImportError: if name in self.ignore_modules: return None raise with readonly_tinydb(): loader = MyLoader() tests = loader.discover( start_dir='.', pattern=args.pattern, ) # Run tests testRunner = MyTestRunner( skip_blacklist=args.skip_blacklist, skip_whitelist=args.skip_whitelist, ignore_blacklist=args.ignore_blacklist, ignore_whitelist=args.ignore_whitelist, verbosity=2, ) test_run = testRunner.run(tests) # Exit with 0 if tests were successful (else 1) return_code = not test_run.wasSuccessful() sys.exit(return_code)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/base.py
tests/base.py
import unittest import sys import os import inspect from collections import defaultdict from copy import copy from cqparts import codec # ------------------- TestCase labels ------------------- def testlabel(*labels): """ Usage:: @testlabel('quick') class MyTest(unittest.TestCase): def test_foo(self): pass """ def inner(cls): # add labels to class cls._labels = set(labels) | getattr(cls, '_labels', set()) return cls return inner # ------------------- Skip Logic ------------------- def skip_if_no_freecad(): import cadquery reason = "freecad 'Helpers.show' could not be imported" try: from Helpers import show except ImportError: # freecad import problem, skip the test return (True, reason) return (False, reason) class suppress_stdout_stderr(object): """ Suppress stdout & stderr from any process:: >>> from base import suppress_stdout_stderr >>> with suppress_stdout_stderr(): ... print("can't see me") a copy of: cadquery.freecad_impl.suppress_stdout_stderr """ def __init__(self): # Open null files self.null_stdout = os.open(os.devnull, os.O_RDWR) self.null_stderr = os.open(os.devnull, os.O_RDWR) # Save the actual stdout (1) and stderr (2) file descriptors. self.saved_stdout = os.dup(1) self.saved_stderr = os.dup(2) def __enter__(self): # Assign the null pointers to stdout and stderr. os.dup2(self.null_stdout, 1) os.dup2(self.null_stderr, 2) def __exit__(self, *_): # Re-assign the real stdout/stderr back to (1) and (2) os.dup2(self.saved_stdout, 1) os.dup2(self.saved_stderr, 2) # Close all file descriptors os.close(self.null_stdout) os.close(self.null_stderr) os.close(self.saved_stdout) os.close(self.saved_stderr) # ------------------- Debugging ------------------- def debug_on_exception(func): """ Opens an ``ipdb`` debugging prompt at the point of failure when an uncaught exception is raised. .. warning:: must not be in production code... only to be used for debugging purposes. Usage:: from base import debug_on_exception @debug_on_exception def foo(a=100): return 1 / a foo(0) results in an ``ipdb`` prompt:: Traceback (most recent call last): File "./test.py", line 8, in wrapper func(*args, **kwargs) File "./test.py", line 19, in foo return 1 / a ZeroDivisionError: integer division or modulo by zero > /home/me/temp/test.py(19)foo() 18 def foo(a=100): ---> 19 return 1 / a 20 ipdb> !a 0 ipdb> """ def ipdb_wrapper(*args, **kwargs): try: func(*args, **kwargs) except: import ipdb, traceback, sys type, value, tb = sys.exc_info() traceback.print_exc() ipdb.post_mortem(tb) return ipdb_wrapper # ------------------- Core TestCase ------------------- class CQPartsTest(unittest.TestCase): def assertHasAttr(self, obj, attr): self.assertTrue(hasattr(obj, attr)) def assertEqualAndType(self, obj, exp, t): self.assertEqual(obj, exp) self.assertEqual(type(exp), t) # explicit test; intentionally not isinstance() def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None): """ Wrap existing method to accept lists of values, and to give an informative error. """ if all(isinstance(x, (tuple, list)) for x in (first, second)): if len(first) != len(second): raise ValueError("list sizes must be the same") for (a, b) in zip(first, second): super(CQPartsTest, self).assertAlmostEqual( a, b, places=places, msg=msg, delta=delta, ) else: super(CQPartsTest, self).assertAlmostEqual( first, second, places=places, msg=msg, delta=delta, ) class CodecRegisterTests(CQPartsTest): def setUp(self): super(CodecRegisterTests, self).setUp() # Retain mapping self.orig_exporter_index = codec.exporter_index self.orig_importer_index = codec.importer_index # Set Mapping codec.exporter_index = defaultdict(dict) codec.importer_index = defaultdict(dict) def tearDown(self): super(CodecRegisterTests, self).tearDown() # Restore mapping codec.exporter_index = self.orig_exporter_index codec.importer_index = self.orig_importer_index
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/test-files/thread_catalogue_build.py
tests/test-files/thread_catalogue_build.py
#!/usr/bin/env python import argparse import sys import itertools import cqparts from cqparts.catalogue import JSONCatalogue # Threads from cqparts_fasteners.solidtypes import threads # Commandline arguments parser = argparse.ArgumentParser(description="Generate a test catalogue") parser.add_argument( 'out', type=str, nargs='?', default='thread_catalogue.json', help='filename to output' ) args = parser.parse_args() catalogue = JSONCatalogue(args.out, clean=True) # -------- Add catalogue items names = ['ball_screw', 'iso68', 'triangular'] lengths = [1, 2, 4, 10] diameters = [1, 3, 4, 5] for (i, (name, length, diam)) in enumerate(itertools.product(names, lengths, diameters)): thread = threads.find(name=name)( _simple=False, length=length, diameter=diam, ) print("adding: %r" % thread) catalogue.add( "%s_%gx%g_%03i" % (name, length, diam, i), thread, criteria={ 'type': name, 'length': length, 'diam': diam, }, ) # close catalogue after additions catalogue.close()
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/test-files/catalogue_build.py
tests/test-files/catalogue_build.py
#!/usr/bin/env python import argparse import sys import cqparts from cqparts.catalogue import JSONCatalogue sys.path.append('..') import partslib # Commandline arguments parser = argparse.ArgumentParser(description="Generate a test catalogue") parser.add_argument( 'out', type=str, nargs='?', default='catalogue.json', help='filename to output' ) args = parser.parse_args() catalogue = JSONCatalogue(args.out, clean=True) # -------- Add catalogue items box_data = [ ('box_a', {'length': 10, 'width': 10, 'height': 10}), ('box_b', {'length': 20, 'width': 20, 'height': 20}), ('box_c', {'length': 10, 'width': 20, 'height': 30}), ] for (idstr, params) in box_data: obj = partslib.Box(**params) print("adding: %r" % obj) catalogue.add(id=idstr, obj=obj, criteria={}) cylinder_data = [ ('cyl_a', {'length': 5, 'radius': 3}), ('cyl_b', {'length': 10, 'radius': 0.5}), ('cyl_c', {'length': 4, 'radius': 10}), ] for (idstr, params) in cylinder_data: obj = partslib.Cylinder(**params) print("adding: %r" % obj) catalogue.add(id=idstr, obj=obj, criteria={}) cubestack_data = [ ('cs_01', {'size_a': 2, 'size_b': 1}), ('cs_02', {'size_a': 10, 'size_b': 4}), ('cs_03', {'size_a': 1, 'size_b': 10}), ] for (idstr, params) in cubestack_data: obj = partslib.CubeStack(**params) print("adding: %r" % obj) catalogue.add(id=idstr, obj=obj, criteria={}) simplecar_data = [ ('ford', {'width': 50, 'length': 100, 'wheelbase': 50, 'wheel_radius': 10}), ('holden', {'width': 40, 'length': 90, 'wheelbase': 40, 'wheel_radius': 20}), ('merc', {'width': 60, 'length': 120, 'wheelbase': 90, 'wheel_radius': 25}), ] for (idstr, params) in simplecar_data: obj = partslib.SimpleCar(**params) print("adding: %r" % obj) catalogue.add(id=idstr, obj=obj, criteria={}) # close catalogue after additions catalogue.close()
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_fasteners/test_fasteners_fasteners_screw.py
tests/t_cqparts_fasteners/test_fasteners_fasteners_screw.py
from base import CQPartsTest from base import testlabel # units under test from cqparts_fasteners.fasteners.screw import ScrewFastener # ---------- Test Assembly ---------- import cadquery import cqparts from partslib.basic import Box from cqparts import constraint from cqparts.utils import CoordSystem class FastenedAssembly(cqparts.Assembly): def make_components(self): base = Box(length=20, width=20, height=12) top = Box(length=18, width=18, height=18) return { 'base': base, 'top': top, 'fastener': ScrewFastener(parts=[base, top]), } def make_constraints(self): base = self.components['base'] top = self.components['top'] fastener = self.components['fastener'] return [ constraint.Fixed(base.mate_bottom), constraint.Coincident(top.mate_bottom, base.mate_top), constraint.Coincident(fastener.mate_origin, top.mate_top + CoordSystem((1, 2, 0))), ] # ---------- Unit Tests ---------- class ScrewFastenerTest(CQPartsTest): def test_fastener(self): obj = FastenedAssembly() screw = obj.find('fastener.screw') self.assertEquals(screw.world_coords.origin, cadquery.Vector((1, 2, 30))) self.assertGreater(screw.bounding_box.zlen, obj.find('base').height) self.assertLess( screw.bounding_box.zlen, obj.find('top').height + obj.find('base').height )
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_fasteners/test_fasteners_catalogue.py
tests/t_cqparts_fasteners/test_fasteners_catalogue.py
from base import testlabel from cqparts.utils.test import CatalogueTest from cqparts.catalogue import JSONCatalogue def add_catalogue(filename): catalogue = JSONCatalogue(filename) cls = testlabel('catalogue')(CatalogueTest.create_from(catalogue)) globals()[cls.__name__] = cls # BoltDepot catalogues add_catalogue('../src/cqparts_fasteners/catalogue/boltdepot-bolts.json') add_catalogue('../src/cqparts_fasteners/catalogue/boltdepot-nuts.json') add_catalogue('../src/cqparts_fasteners/catalogue/boltdepot-woodscrews.json')
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_fasteners/__init__.py
tests/t_cqparts_fasteners/__init__.py
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_fasteners/test_fasteners_fasteners_nutbolt.py
tests/t_cqparts_fasteners/test_fasteners_fasteners_nutbolt.py
from base import CQPartsTest from base import testlabel # units under test from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener # ---------- Test Assembly ---------- import cadquery import cqparts from partslib.basic import Box from cqparts import constraint from cqparts.utils import CoordSystem class FastenedAssembly(cqparts.Assembly): def make_components(self): base = Box(length=20, width=20, height=12) top = Box(length=18, width=18, height=18) return { 'base': base, 'top': top, 'fastener': NutAndBoltFastener(parts=[base, top]), } def make_constraints(self): base = self.components['base'] top = self.components['top'] fastener = self.components['fastener'] return [ constraint.Fixed(base.mate_bottom), constraint.Coincident(top.mate_bottom, base.mate_top), constraint.Coincident(fastener.mate_origin, top.mate_top + CoordSystem((1, 2, 0))), ] # ---------- Unit Tests ---------- class ScrewFastenerTest(CQPartsTest): def test_fastener(self): obj = FastenedAssembly() bolt = obj.find('fastener.bolt') nut = obj.find('fastener.nut') self.assertEquals(bolt.world_coords.origin, cadquery.Vector((1, 2, 30))) self.assertGreater( bolt.bounding_box.zlen, obj.find('top').height + obj.find('base').height ) self.assertEquals(nut.world_coords.origin, cadquery.Vector((1, 2, 0)))
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_fasteners/test_fasteners_thread.py
tests/t_cqparts_fasteners/test_fasteners_thread.py
import unittest from base import testlabel from cqparts.utils.test import CatalogueTest from cqparts.catalogue import JSONCatalogue catalogue = JSONCatalogue('test-files/thread_catalogue.json') cls = testlabel('complex_thread')(CatalogueTest.create_from(catalogue)) # FIXME: when #1 is fixed, remove this so tests are not permanently skipped cls = unittest.skip('skipped until #1 is fixed')(cls) globals()[cls.__name__] = cls
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_template/test_template_catalogue.py
tests/t_cqparts_template/test_template_catalogue.py
from base import testlabel from cqparts.utils.test import CatalogueTest from cqparts.catalogue import JSONCatalogue def add_catalogue(filename): catalogue = JSONCatalogue(filename) cls = testlabel('catalogue')(CatalogueTest.create_from(catalogue)) globals()[cls.__name__] = cls # BoltDepot catalogues add_catalogue('../src/cqparts_template/catalogue/pegs.json')
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_template/__init__.py
tests/t_cqparts_template/__init__.py
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_part.py
tests/t_cqparts/test_part.py
from copy import copy from math import sqrt from base import CQPartsTest from base import testlabel import cadquery from partslib import Box, Cylinder # Unit under test import cqparts from cqparts.errors import MakeError from cqparts.utils import CoordSystem from cqparts.params import Int class PreMaturePartTests(CQPartsTest): def test_no_make(self): class P(cqparts.Part): pass # no content with self.assertRaises(NotImplementedError): P().local_obj def test_bad_make(self): class P(cqparts.Part): def make(self): return 1 with self.assertRaises(MakeError): P().local_obj class MakeSimpleTests(CQPartsTest): def test_auto_simplify(self): class P(cqparts.Part): def make(self): return cadquery.Workplane('XY').circle(1).extrude(1) # complex part p_complex = P() cbb = p_complex.local_obj.val().BoundingBox() # complex geometry # simplified part p_simple = P(_simple=True) sbb = p_simple.local_obj.val().BoundingBox() # simplified geometry self.assertAlmostEqual(cbb.xmin, sbb.xmin) self.assertAlmostEqual(cbb.xmax, sbb.xmax) self.assertAlmostEqual(cbb.ymin, sbb.ymin) self.assertAlmostEqual(cbb.ymax, sbb.ymax) self.assertAlmostEqual(cbb.zmin, sbb.zmin) self.assertAlmostEqual(cbb.zmax, sbb.zmax) def test_simplify(self): class P(cqparts.Part): def make(self): return cadquery.Workplane('XY').box(1,1,1) # small box def make_simple(self): return cadquery.Workplane('XY').box(10,10,10) # big box # complex geometry yields unit cube cbb = P().local_obj.val().BoundingBox() self.assertAlmostEqual((cbb.xmin, cbb.xmax), (-0.5, 0.5)) # complex geometry yields cube with 10xunit sides sbb = P(_simple=True).local_obj.val().BoundingBox() self.assertAlmostEqual((sbb.xmin, sbb.xmax), (-5, 5)) class BuildCycleTests(CQPartsTest): def test_build(self): class P(cqparts.Part): def __init__(self, *args, **kwargs): self._flag = False super(P, self).__init__(*args, **kwargs) def make(self): self._flag = True return cadquery.Workplane('XY').box(1,1,1) p = P() self.assertFalse(p._flag) p.build() self.assertTrue(p._flag) def test_set_world_coords(self): class P(cqparts.Part): def make(self): return cadquery.Workplane('XY').box(1,1,1) p = P() self.assertIsNone(p.world_obj) p.world_coords = CoordSystem() self.assertIsNotNone(p.world_obj) p.world_coords = None self.assertIsNone(p.world_obj) def test_set_world_obj(self): class P(cqparts.Part): def make(self): return cadquery.Workplane('XY').box(1,1,1) p = P() p.world_coords = CoordSystem() self.assertIsNotNone(p.world_obj) with self.assertRaises(ValueError): p.world_obj = 'value is irrelevant' def test_set_local_obj(self): class P(cqparts.Part): def make(self): return cadquery.Workplane('XY').box(1, 1, 1) p = P() p.world_coords = CoordSystem(origin=(0,0,10)) bb = p.world_obj.val().BoundingBox() self.assertAlmostEqual(bb.DiagonalLength, sqrt(3)) self.assertAlmostEqual((bb.zmin, bb.zmax), (-0.5 + 10, 0.5 + 10)) # change local p.local_obj = cadquery.Workplane('XY').box(10, 10, 10) bb = p.world_obj.val().BoundingBox() self.assertAlmostEqual(bb.DiagonalLength, sqrt(3) * 10) self.assertAlmostEqual((bb.zmin, bb.zmax), (-5 + 10, 5 + 10)) class CopyTests(CQPartsTest): def test_copy(self): class P(cqparts.Part): a = Int(10) def make(self): return cadquery.Workplane('XY').box(1, 1, 1) p1 = P() p2 = copy(p1) class BoundingBoxTests(CQPartsTest): def test_box(self): obj = Box(length=1, width=2, height=3) bb = obj.bounding_box self.assertAlmostEqual( (bb.xmin, bb.ymin, bb.zmin, bb.xmax, bb.ymax, bb.zmax), (-0.5, -1, -1.5, 0.5, 1, 1.5), places=1 ) def test_cylinder(self): obj = Cylinder(length=1, radius=2) bb = obj.bounding_box self.assertAlmostEqual( (bb.xmin, bb.ymin, bb.zmin, bb.xmax, bb.ymax, bb.zmax), (-2, -2, -0.5, 2, 2, 0.5), places=1 )
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_params.py
tests/t_cqparts/test_params.py
from copy import copy from base import CQPartsTest from base import testlabel import cadquery from partslib import Box from cqparts.constraint import Fixed # Unit under test from cqparts.params import * from cqparts.errors import ParameterError from cqparts import Part, Assembly class ParametricObjectTests(CQPartsTest): def test_inherited_params(self): class T1(ParametricObject): a = Float(1.2) b = Int(3) class T2(T1): c = IntRange(0, 10, 5) self.assertHasAttr(T2(), 'a') self.assertHasAttr(T2(), 'c') self.assertEqual(T2().a, 1.2) self.assertEqual(T2(a=5.2).a, 5.2) def test_copy(self): class T(ParametricObject): a = Float(1.2) b = Int(5) t1 = T(a=2.1) t2 = copy(t1) # copy is a copy self.assertEqual(t1.a, t2.a) self.assertEqual(t1.b, t2.b) # changing t1 values doesn't change t2 values t1.a = 4.9 t1.b = 8 self.assertNotEqual(t1.a, t2.a) self.assertNotEqual(t1.b, t2.b) def test_bad_params(self): class P(ParametricObject): a = Float(1.2) b = Int(3) p1 = P(a=1, b=2) with self.assertRaises(ParameterError): p2 = P(a=1, b=2, c=3) # no 'c' parameter class DeserializeTestClass(ParametricObject): # class for: # ParametricObjectSerializeTests.test_deserialize # ParametricObjectSerializeTests.test_deserialize_baddata a = Float(1.2) b = Int(3) class ParametricObjectSerializeTests(CQPartsTest): def test_serialize(self): class P(ParametricObject): a = Float(1.2) b = Int(3) data = P().serialize() # verify serialized structure self.assertEqual(set(data.keys()), set(['lib', 'class', 'params'])) self.assertEqual(set(data['lib'].keys()), set(['name', 'version'])) self.assertEqual(set(data['class'].keys()), set(['name', 'module'])) self.assertEqual(data['params'], {'a': 1.2, 'b': 3}) def test_deserialize(self): p1 = DeserializeTestClass() data = p1.serialize() p2 = ParametricObject.deserialize(data) # verify deserialized object is equal self.assertEqual(type(p1), type(p2)) self.assertEqual((p1.a, p1.b), (p2.a, p2.b)) def test_deserialize_bad_module(self): data = DeserializeTestClass().serialize() # assumption: test_deserialize passes data['class']['module'] += '__noexist__' with self.assertRaises(ImportError): ParametricObject.deserialize(data) def test_deserialize_bad_name(self): data = DeserializeTestClass().serialize() # assumption: test_deserialize passes data['class']['name'] += '__noexist__' with self.assertRaises(ImportError): ParametricObject.deserialize(data) class ParameterTests(CQPartsTest): def test_sphinx_param(self): class MyParam(Parameter): pass # default p1 = MyParam() self.assertIsInstance(p1._param(), str) # custom p2 = MyParam(doc="custom doc") self.assertEqual(p2._param(), "custom doc") def test_sphinx_type(self): # default class MyParam1(Parameter): pass p1 = MyParam1() self.assertIsInstance(p1._type(), str) # custom class MyParam2(Parameter): _doc_type = "custom type" p2 = MyParam2() self.assertEqual(p2._type(), "custom type") def test_new(self): class MyParam(Parameter): pass p1 = MyParam(10) p2 = p1.new(20) self.assertIsInstance(p2, MyParam) self.assertEqual(p2.default, 20) class ObjectWrapperTests(CQPartsTest): def test_as_parameter_nullable(self): class _ContainerParam(object): def __init__(self, a=1, b=2, c=3): self.a = a self.b = b self.c = c ContainerParam = as_parameter(nullable=True)(_ContainerParam) self.assertTrue(issubclass(ContainerParam, Parameter)) class Thing(ParametricObject): foo = ContainerParam({'a': 10, 'b': 100}, doc="controls stuff") thing = Thing(foo={'a': 20}) self.assertIsInstance(thing.foo, _ContainerParam) self.assertEqual(thing.foo.a, 20) self.assertEqual(thing.foo.b, 2) self.assertEqual(thing.foo.c, 3) thing = Thing(foo=None) self.assertIsNone(thing.foo) def test_as_parameter_not_nullable(self): @as_parameter(nullable=False) class ContainerParam(object): def __init__(self, a=1, b=2, c=3): self.a = a self.b = b self.c = c self.assertTrue(issubclass(ContainerParam, Parameter)) class Thing(ParametricObject): foo = ContainerParam({'a': 10, 'b': 100}, doc="controls stuff") with self.assertRaises(ParameterError): Thing(foo=None) def test_as_parameter_default(self): class Coords(object): def __init__(self, x=1, y=2): self.x = x self.y = y class CoordsParam(Parameter): def type(self, value): return Coords(*value) class Thing(ParametricObject): c1 = CoordsParam() # no default (default=None) c2 = CoordsParam(default=(10, 20)) obj1 = Thing() obj2 = Thing() # No default self.assertIsNone(obj1.c1) self.assertIsNone(obj2.c1) # Given Default self.assertEqual((obj1.c2.x, obj1.c2.y), (10, 20)) self.assertEqual((obj2.c2.x, obj2.c2.y), (10, 20)) self.assertNotEqual(id(obj1.c2), id(obj2.c2)) class ParameterTypeTests(CQPartsTest): pass class FloatTests(ParameterTypeTests): def test_float(self): p = Float(1.5) # default self.assertEqual(p.default, 1.5) self.assertIsInstance(p.default, float) # casting self.assertEqual(p.cast(1), 1) self.assertIsInstance(p.cast(1), float) self.assertRaises(ParameterError, p.cast, 'abc') # nullable self.assertIsNone(p.cast(None)) def test_positive_float(self): p = PositiveFloat(1.5) self.assertEqual(p.default, 1.5) self.assertIsInstance(p.cast(1), float) self.assertIsInstance(p.cast(0), float) self.assertRaises(ParameterError, p.cast, 'abc') self.assertRaises(ParameterError, p.cast, -1) self.assertIsNone(p.cast(None)) def test_float_range(self): p = FloatRange(-10, 10, 5) # default self.assertEqual(p.default, 5) self.assertIsInstance(p.default, float) # casting self.assertEqual(p.cast(0), 0) self.assertIsInstance(p.cast(0), float) self.assertEqual(p.cast(-10), -10) self.assertEqual(p.cast(10), 10) # outside range self.assertRaises(ParameterError, p.cast, -11) self.assertRaises(ParameterError, p.cast, 11) # nullable self.assertIsNone(p.cast(None)) class IntTests(ParameterTypeTests): def test_int(self): p = Int(1) # default self.assertEqual(p.default, 1) self.assertIsInstance(p.default, int) # casting self.assertEqual(p.cast(15), 15) self.assertIsInstance(p.cast(10), int) self.assertRaises(ParameterError, p.cast, 'abc') # nullable self.assertIsNone(p.cast(None)) def test_positive_int(self): p = PositiveInt(1) # default self.assertEqual(p.default, 1) self.assertIsInstance(p.default, int) # casting self.assertEqual(p.cast(15), 15) self.assertIsInstance(p.cast(10), int) self.assertRaises(ParameterError, p.cast, 'abc') # nullable self.assertIsNone(p.cast(None)) def test_int_range(self): p = IntRange(-10, 10, 5) # default self.assertEqual(p.default, 5) self.assertIsInstance(p.default, int) # casting self.assertEqual(p.cast(0), 0) self.assertIsInstance(p.cast(0), int) self.assertEqual(p.cast(-10), -10) self.assertEqual(p.cast(10), 10) # outside range self.assertRaises(ParameterError, p.cast, -11) self.assertRaises(ParameterError, p.cast, 11) # nullable self.assertIsNone(p.cast(None)) class BoolTests(ParameterTypeTests): def test_bool(self): p = Boolean(True) # default self.assertEqual(p.default, True) self.assertIsInstance(p.default, bool) # casting self.assertEqualAndType(p.cast(0), False, bool) self.assertEqualAndType(p.cast(1), True, bool) self.assertEqualAndType(p.cast(''), False, bool) self.assertEqualAndType(p.cast('abc'), True, bool) # nullable self.assertIsNone(p.cast(None)) class StringTests(ParameterTypeTests): def test_string(self): p = String('abc') # default self.assertEqual(p.default, 'abc') self.assertIsInstance(p.default, str) # casting self.assertEqual(p.cast('xyz'), 'xyz') self.assertEqual(p.cast(''), '') self.assertEqual(p.cast(1), '1') self.assertEqual(p.cast(1.2), '1.2') # nullable self.assertIsNone(p.cast(None)) def test_lc_string(self): p = LowerCaseString('AbC') # default self.assertEqual(p.default, 'abc') self.assertIsInstance(p.default, str) # casting self.assertEqual(p.cast('XYZ'), 'xyz') self.assertEqual(p.cast(''), '') self.assertEqual(p.cast(1), '1') self.assertEqual(p.cast(1.2), '1.2') # nullable self.assertIsNone(p.cast(None)) def test_uc_string(self): p = UpperCaseString('AbC') # default self.assertEqual(p.default, 'ABC') self.assertIsInstance(p.default, str) # casting self.assertEqual(p.cast('xyz'), 'XYZ') self.assertEqual(p.cast(''), '') self.assertEqual(p.cast(1), '1') self.assertEqual(p.cast(1.2), '1.2') # nullable self.assertIsNone(p.cast(None)) class NonNullTests(ParameterTypeTests): def test_non_null(self): p1 = NonNullParameter(1) # not nullable self.assertRaises(ParameterError, p1.cast, None) # Inherited class P(Int, NonNullParameter): pass p2 = P(1) self.assertRaises(ParameterError, p2.cast, None) class PartsListTests(ParameterTypeTests): def test_partslist(self): p = PartsList() (b1, b2) = (Box(), Box()) v = p.cast([b1, b2]) # list self.assertIsInstance(v, (list, tuple)) self.assertEqual([id(x) for x in v], [id(b1), id(b2)]) def test_partstuple(self): p = PartsList() (b1, b2) = (Box(), Box()) v = p.cast((b1, b2)) # tuple self.assertIsInstance(v, (list, tuple)) self.assertEqual([id(x) for x in v], [id(b1), id(b2)]) def test_bad_value(self): p = PartsList() with self.assertRaises(ParameterError): p.cast(1) def test_bad_part(self): p = PartsList() with self.assertRaises(ParameterError): p.cast([Box(), 1]) class ComponentRefTests(ParameterTypeTests): def test_part(self): p = ComponentRef() v = p.cast(Part()) self.assertIsInstance(v, Part) def test_assembly(self): p = ComponentRef() v = p.cast(Assembly()) self.assertIsInstance(v, Assembly) def test_nullable(self): p = ComponentRef() v = p.cast(None) self.assertIsNone(v) def test_parent(self): # Define test classes class A(Part): parent = ComponentRef() def make(self): s = self.parent.size return cadquery.Workplane('XY').box(s, s, s) class B(Assembly): size = Float(1) def make_components(self): return {'inner': A(parent=self)} def make_constraints(self): return [Fixed(self.components['inner'].mate_origin)] # Instantiate & Test obj = B(size=2) obj.build() # shouldn't get into a recursive loop self.assertEqual(id(obj.find('inner').parent), id(obj)) # Test inner box size == parent size (because that's how it was designed) bb = obj.find('inner').bounding_box self.assertAlmostEqual(bb.xmin, -1) self.assertAlmostEqual(bb.xmax, 1)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_codecs.py
tests/t_cqparts/test_codecs.py
import unittest import mock import os import tempfile import shutil from collections import defaultdict from contextlib import contextmanager from base import CQPartsTest, CodecRegisterTests from base import testlabel from base import suppress_stdout_stderr # Unit(s) under test from cqparts import codec from cqparts import Part, Assembly, Component from partslib import Box, Cylinder, CubeStack class CodecTest(CQPartsTest): def assertFilesizeZero(self, filename): self.assertTrue(os.path.exists(filename)) self.assertEqual(os.stat(filename).st_size, 0) def assertFilesizeNonZero(self, filename): self.assertTrue(os.path.exists(filename), "file does not exist: {!r}".format(filename)) self.assertGreater(os.stat(filename).st_size, 0, "file is empty: {!r}".format(filename)) @contextmanager def assertCreatesFile(self, filename, nonzero=True): """ Assert that the code in context creates the given file :param filename: name of file to be created :type filename: :class:`str` """ self.assertFalse(os.path.exists(filename), "file already exists: {}".format(filename)) yield self.assertTrue(os.path.exists(filename), "file was not created: {}".format(filename)) if nonzero: self.assertFilesizeNonZero(filename) class CodecFileTest(CodecTest): def setUp(self): # Create a named temporary file to write to handle, self.filename = tempfile.mkstemp() os.close(handle) def tearDown(self): # Remove temporary file os.unlink(self.filename) class CodecFolderTest(CodecTest): def setUp(self): # Create a temporary folder to populate, then delete self.foldername = tempfile.mkdtemp() def tearDown(self): # Remove temporary folder, and all content recursively shutil.rmtree(self.foldername) # ------- Register Tests ------- class ExporterRegisterTests(CodecRegisterTests): def test_register(self): @codec.register_exporter('abc', Part) class Abc(codec.Exporter): pass self.assertEqual(codec.exporter_index, {'abc': {Part: Abc}}) def test_get_registered(self): @codec.register_exporter('abc', Part) class Abc(codec.Exporter): pass self.assertIsInstance(codec.get_exporter(Box(), 'abc'), Abc) def test_get_registered_subtype(self): @codec.register_exporter('abc', Component) class Abc(codec.Exporter): pass self.assertIsInstance(codec.get_exporter(Box(), 'abc'), Abc) # Part def test_bad_name(self): with self.assertRaises(TypeError): @codec.register_exporter(123, Part) # bad name type class Abc(codec.Exporter): pass def test_bad_base_class(self): with self.assertRaises(TypeError): @codec.register_exporter('abc', int) # base_class is not a Component class Abc(codec.Exporter): pass def test_re_register(self): @codec.register_exporter('abc', Part) class Abc(codec.Exporter): pass with self.assertRaises(TypeError): @codec.register_exporter('abc', Part) # duplicate class Def(codec.Exporter): pass def test_base_class_conflict(self): @codec.register_exporter('abc', Component) class Abc(codec.Exporter): pass with self.assertRaises(TypeError): @codec.register_exporter('abc', Part) # Part is a Component class Def(codec.Exporter): pass def test_no_exporter(self): with self.assertRaises(TypeError): codec.get_exporter(Box(), 'abc') def test_no_exporter_for_type(self): @codec.register_exporter('abc', Part) class Abc(codec.Exporter): pass with self.assertRaises(TypeError): codec.get_exporter(CubeStack(), 'abc') # assembly class ImporterRegisterTests(CodecRegisterTests): def test_register(self): @codec.register_importer('abc', Part) class Abc(codec.Importer): pass self.assertEqual(codec.importer_index, {'abc': {Part: Abc}}) def test_get_registered(self): @codec.register_importer('abc', Part) class Abc(codec.Importer): pass self.assertIsInstance(codec.get_importer(Box, 'abc'), Abc) def test_get_registered_subtype(self): @codec.register_importer('abc', Component) class Abc(codec.Importer): pass self.assertIsInstance(codec.get_importer(Box, 'abc'), Abc) def test_bad_name(self): with self.assertRaises(TypeError): @codec.register_importer(123, Part) # bad name type class Abc(codec.Importer): pass def test_bad_base_class(self): with self.assertRaises(TypeError): @codec.register_importer('abc', str) # base_class is not a Component class Abc(codec.Importer): pass def test_re_register(self): @codec.register_importer('abc', Part) class Abc(codec.Importer): pass with self.assertRaises(TypeError): @codec.register_importer('abc', Part) # duplicate register class Def(codec.Importer): pass def test_base_class_conflict(self): @codec.register_importer('abc', Component) class Abc(codec.Importer): pass with self.assertRaises(TypeError): @codec.register_importer('abc', Part) # Part is a Component class Def(codec.Importer): pass def test_no_importer(self): with self.assertRaises(TypeError): codec.get_importer(Box, 'abc') def test_no_importer_for_type(self): @codec.register_importer('abc', Part) class Abc(codec.Importer): pass with self.assertRaises(TypeError): codec.get_importer(CubeStack, 'abc') # ------- Specific Codecs ------- # --- Codec: step @testlabel('codec', 'codc_step') class TestStep(CodecFileTest): def test_export(self): cube = Box() self.assertFilesizeZero(self.filename) cube.exporter('step')(self.filename) self.assertFilesizeNonZero(self.filename) def test_import(self): filename = 'test-files/cube.step' with suppress_stdout_stderr(): cube = Part.importer('step')(filename) self.assertEqual(type(cube).__name__, 'cube_step') self.assertAlmostEqual(cube.bounding_box.xmin, -0.5) self.assertAlmostEqual(cube.bounding_box.xmax, 0.5) def test_import_unicode(self): filename = u'test-files/cube.step' with suppress_stdout_stderr(): cube = Part.importer('step')(filename) self.assertEqual(type(cube).__name__, 'cube_step') self.assertAlmostEqual(cube.bounding_box.xmin, -0.5) self.assertAlmostEqual(cube.bounding_box.xmax, 0.5) @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) def test_mangle_numberstart(self): filename = 'test-files/0123_noexist.step' part = Part.importer('step')(filename) self.assertEqual(type(part).__name__, '_0123_noexist_step') def test_import_nofile(self): filename = 'test-files/noexist.step' with self.assertRaises(ValueError): # exception raised before Part.importer('step')(filename) def test_import_badformat(self): filename = 'test-files/bad_format.step' # file exists, but is not a valid STEP file thing = Part.importer('step')(filename) # exception not raised before object is formed with self.assertRaises(ValueError): with suppress_stdout_stderr(): thing.local_obj def test_multipart_part(self): # When imported as a Part, geometry is unioned together filename = 'test-files/red_cube_blue_cylinder.step' with suppress_stdout_stderr(): thing = Part.importer('step')(filename) # cylinder {5 < x < 15}, box {-10 < x < 0} # combined they should be {-10 < x < 15} self.assertAlmostEqual(thing.bounding_box.xmin, -10) self.assertAlmostEqual(thing.bounding_box.xmax, 15) def test_multipart_assembly(self): # When imported as an Assembly, each individual mesh # is imported as a component Part of the resulting Assembly. filename = 'test-files/red_cube_blue_cylinder.step' with suppress_stdout_stderr(): thing = Assembly.importer('step')(filename) self.assertEqual(len(thing.components), 2) self.assertEqual(len(thing.constraints), 2) # --- Codec: json @testlabel('codec', 'codec_json') class TestJsonPart(CodecFileTest): def test_export(self): cube = Box() self.assertFilesizeZero(self.filename) cube.exporter('json')(self.filename) self.assertFilesizeNonZero(self.filename) @testlabel('codec', 'codec_json') class TestJsonAssembly(CodecFolderTest): def test_export(self): obj = CubeStack() f = lambda n: os.path.join(self.foldername, n) obj.exporter('json')(f('out.json')) self.assertFalse(os.path.exists(f('out.json'))) self.assertFilesizeNonZero(f('out.cube_a.json')) self.assertFilesizeNonZero(f('out.cube_b.json')) # --- Codec: stl @testlabel('codec', 'codec_stl') class TestStl(CodecFileTest): def test_export(self): cube = Box() self.assertFilesizeZero(self.filename) cube.exporter('stl')(self.filename) self.assertFilesizeNonZero(self.filename) # TODO: temporarily removed # getting error on my virtual environment # LookupError: unknown encoding: unicode # cause unknown # --- Codec: amf @testlabel('codec', 'codec_amf') class TestAmf(CodecFileTest): def test_export(self): cube = Box() self.assertEqual(os.stat(self.filename).st_size, 0) cube.exporter('amf')(self.filename) self.assertGreater(os.stat(self.filename).st_size, 0) # --- Codec: svg @testlabel('codec', 'codec_svg') class TestSvg(CodecFileTest): def test_export(self): cube = Box() self.assertFilesizeZero(self.filename) cube.exporter('svg')(self.filename) self.assertGreater(os.stat(self.filename).st_size, 0) # --- Codec: gltf @testlabel('codec', 'codec_gltf') class TestGltf(CodecFolderTest): def test_part_not_embedded(self): cube = Box() cube.exporter('gltf')( os.path.join(self.foldername, 'cube.gltf'), embed=False, ) self.assertFilesizeNonZero(os.path.join(self.foldername, 'cube.gltf')) self.assertFilesizeNonZero(os.path.join(self.foldername, 'cube.bin')) def test_part_embedded(self): cube = Box() cube.exporter('gltf')( os.path.join(self.foldername, 'cube.gltf'), embed=True, ) self.assertFilesizeNonZero(os.path.join(self.foldername, 'cube.gltf')) self.assertFalse(os.path.exists(os.path.join(self.foldername, 'cube.bin'))) def test_assembly(self): asm = CubeStack() asm.exporter('gltf')( os.path.join(self.foldername, 'asm.gltf') ) self.assertFilesizeNonZero(os.path.join(self.foldername, 'asm.gltf')) for name in asm.components.keys(): # only works because it's a single layer assembly self.assertFilesizeNonZero( os.path.join(self.foldername, 'asm-%s.bin' % name) ) def test_tolerance(self): def get_polycount(tolerance): obj = Cylinder(radius=10, length=10) # new object per export exporter = obj.exporter('gltf') buffer = exporter.part_buffer(obj, tolerance=tolerance) return int(buffer.idx_size / 3) # 3 vertex indices per polygon self.assertGreater( get_polycount(tolerance=0.01), get_polycount(tolerance=0.2) ) # TODO: re-introduce test with OCC integration @unittest.skip("FreeCAD tessellate appears to cache") def test_tolerance_nocache(self): # note: same test as ``test_tolerance`` but without a creating a new object def get_polycount(obj, tolerance): exporter = obj.exporter('gltf') buffer = exporter.part_buffer(obj, tolerance=tolerance) return int(buffer.idx_size / 3) # 3 vertex indices per polygon # tolerance: high -> low c1 = Cylinder(radius=10, length=10) self.assertLess( get_polycount(c1, tolerance=0.2), get_polycount(c1, tolerance=0.01) ) # tolerance: low -> high c2 = Cylinder(radius=10, length=10) self.assertGreater( get_polycount(c2, tolerance=0.01), get_polycount(c2, tolerance=0.2) ) @testlabel('codec', 'codec_gltf') class TestGltfBuffer(CQPartsTest): def test_indices_sizes(self): # 1 byte sb = codec.gltf.ShapeBuffer(max_index=0xff) self.assertEqual(sb.idx_bytelen, 1) sb.add_poly_index(1, 2, 3) self.assertEqual(sb.idx_size, 3) self.assertEqual(sb.idx_len, 3) # 2 bytes sb = codec.gltf.ShapeBuffer(max_index=0xff + 1) self.assertEqual(sb.idx_bytelen, 2) sb.add_poly_index(1, 2, 3) self.assertEqual(sb.idx_size, 3) self.assertEqual(sb.idx_len, 6) # 4 bytes sb = codec.gltf.ShapeBuffer(max_index=0xffff + 1) self.assertEqual(sb.idx_bytelen, 4) sb.add_poly_index(1, 2, 3) self.assertEqual(sb.idx_size, 3) self.assertEqual(sb.idx_len, 12)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_wrappers.py
tests/t_cqparts/test_wrappers.py
from base import CQPartsTest from base import testlabel import cadquery # Units under test import cqparts from cqparts.utils import as_part class TestAsPart(CQPartsTest): def test_basic(self): @as_part def Box(x=1, y=2, z=3): return cadquery.Workplane('XY').box(x, y, z) box = Box() self.assertIsInstance(box, cqparts.Part) self.assertIsInstance(box.local_obj, cadquery.Workplane)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_catalogue_test.py
tests/t_cqparts/test_catalogue_test.py
from cqparts.utils.test import CatalogueTest from cqparts.catalogue import JSONCatalogue catalogue = JSONCatalogue('test-files/catalogue.json') CatalogueTest.create_from(catalogue, add_to=globals())
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_constraint.py
tests/t_cqparts/test_constraint.py
from base import CQPartsTest from base import testlabel # Unit under test from cqparts.constraint import Mate from cqparts.constraint import Fixed from cqparts.constraint import Coincident from cqparts.utils import CoordSystem from cqparts.constraint.solver import solver from partslib.basic import Box class MateTests(CQPartsTest): def test_init(self): obj = Box() cs = CoordSystem() # normal m = Mate(obj, cs) self.assertEqual(id(m.component), id(obj)) self.assertEqual(id(m.local_coords), id(cs)) # no component m = Mate(None, cs) self.assertIsNone(m.component) self.assertEqual(id(m.local_coords), id(cs)) # no coords m = Mate(obj) self.assertEqual(id(m.component), id(obj)) self.assertEqual(m.local_coords, CoordSystem()) def test_bad_component(self): self.assertRaises(TypeError, Mate, 'nope') def test_bad_coords(self): self.assertRaises(TypeError, Mate, Box(), 123) def test_world_coords(self): cs1 = CoordSystem(origin=(1,2,3)) cs2 = CoordSystem(origin=(1,1,1)) box = Box() box.world_coords = cs2 m = Mate(box, cs1) self.assertEqual(m.world_coords, cs1 + cs2) def test_world_coords_badcmp(self): cs = CoordSystem(origin=(1,2,3)) box = Box() m = Mate(box, cs) with self.assertRaises(ValueError): m.world_coords def test_world_coords_nocmp(self): cs = CoordSystem(origin=(1,2,3)) m = Mate(None, cs) self.assertEqual(m.world_coords, cs) def test_add_coordsys(self): cs1 = CoordSystem(origin=(1,2,3)) cs2 = CoordSystem(origin=(0,2,4)) m1 = Mate(Box(), cs1) m2 = m1 + cs2 self.assertEqual(m2.local_coords, cs1 + cs2) def test_add_badtype(self): m = Mate(Box()) with self.assertRaises(TypeError): m + 100 def test_repr(self): "%r" % Mate(Box(), CoordSystem()) class FixedConstraintTests(CQPartsTest): def test_basic(self): mate = Mate(Box()) cs = CoordSystem() c = Fixed(mate, cs) # assert composition self.assertEqual(id(mate), id(c.mate)) self.assertEqual(cs, c.world_coords) def test_bad_mate(self): self.assertRaises(TypeError, Fixed, 1) def test_bad_coords(self): self.assertRaises(TypeError, Fixed, Mate(Box()), 1) def test_mate(self): mate = Mate(Box()) cs1 = CoordSystem(origin=(1,1,1)) box = Box() box.world_coords = cs1 cs2 = CoordSystem(origin=(2,3,4)) coords = Mate(box, cs2) c = Fixed(mate, coords) # given world_coords is from a mate self.assertEqual(cs1 + cs2, c.world_coords) def test_default(self): c = Fixed(Mate(Box())) self.assertEqual(c.world_coords, CoordSystem()) class CoincidentConstraintTests(CQPartsTest): def test_basic(self): mate1 = Mate(Box()) mate2 = Mate(Box()) c = Coincident(mate1, mate2) # assert composition self.assertEqual(id(mate1), id(c.mate)) self.assertEqual(id(mate2), id(c.to_mate)) def test_bad_mate(self): self.assertRaises(TypeError, Coincident, 1, Mate(Box())) def test_bad_to_mate(self): self.assertRaises(TypeError, Coincident, Mate(Box()), 1)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_solver.py
tests/t_cqparts/test_solver.py
from base import CQPartsTest from base import testlabel import cadquery # Unit under test from cqparts.constraint import Fixed, Coincident from cqparts.constraint import Mate from cqparts.utils import CoordSystem from cqparts.constraint.solver import solver from partslib.basic import Box class FixedSolverTests(CQPartsTest): def test_random(self): box = Box() for (s1, s2) in [(1, 2), (3, 4), (5, 6), (7, 8)]: cs1 = CoordSystem.random(seed=s1) cs2 = CoordSystem.random(seed=s2) # create constraint c = Fixed(Mate(box, cs1), cs2) # solve solution = list(solver([c])) # assert results self.assertEqual(len(solution), 1) (part, coords) = solution[0] self.assertEqual(id(part), id(box)) self.assertEqual(coords, cs2 + (CoordSystem() - cs1)) # note: the above test is effectively reverse engineering the solution. # only (partially) effective as a regression test def test_rotation(self): box = Box() # +'ve rotation c = Fixed(Mate(box, CoordSystem()), CoordSystem(xDir=(1, 0.1, 0))) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector()) self.assertEqual(coords.xDir, cadquery.Vector(1, 0.1, 0).normalized()) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) # -'ve rotation c = Fixed(Mate(box, CoordSystem(xDir=(1, 0.1, 0))), CoordSystem()) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector()) self.assertEqual(coords.xDir, cadquery.Vector(1, -0.1, 0).normalized()) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) def test_translation(self): box = Box() # +'ve translation c = Fixed(Mate(box, CoordSystem()), CoordSystem(origin=(1, 2, 3))) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector(1, 2, 3)) self.assertEqual(coords.xDir, cadquery.Vector(1, 0, 0)) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) # -'ve translation c = Fixed(Mate(box, CoordSystem(origin=(1, 2, 3))), CoordSystem()) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector(-1, -2, -3)) self.assertEqual(coords.xDir, cadquery.Vector(1, 0, 0)) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) def test_origin(self): box = Box() c = Fixed(Mate(box), CoordSystem()) # default origin (0, 0, 0) (part, coords) = list(solver([c]))[0] self.assertEqual(coords, CoordSystem((0, 0, 0))) # origin displaced (part, coords) = list(solver([c], CoordSystem(origin=(1, 2, 3))))[0] self.assertEqual(coords, CoordSystem((1, 2, 3))) class CoincidentSolverTests(CQPartsTest): def test_no_solution(self): (box1, box2) = (Box(), Box()) # neither have world_coords c = Coincident(box2.mate_origin, box1.mate_top) self.assertIsNone(box1.world_coords) # test criteria with self.assertRaises(ValueError): list(solver([c])) def test_solution(self): (box1, box2) = (Box(), Box()) # set box1 world location: sit it on top of xy plane box1.world_coords = CoordSystem((0, 0, box1.height / 2)) # +'ve rotation c = Coincident(box2.mate_origin, box1.mate_top) solution = list(solver([c])) self.assertEqual(len(solution), 1) (part, coords) = solution[0] self.assertEqual(id(part), id(box2)) self.assertEqual(coords, CoordSystem((0, 0, box1.height))) def test_rotation(self): (box1, box2) = (Box(), Box()) # set box1 world location: sit it on top of xy plane box1.world_coords = CoordSystem((0, 0, box1.height / 2)) # +'ve rotation c = Coincident( box2.mate_origin, box1.mate_top + CoordSystem(xDir=(1, 0.1, 0)) ) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector(0, 0, box1.height)) self.assertEqual(coords.xDir, cadquery.Vector(1, 0.1, 0).normalized()) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) # -'ve rotation c = Coincident( box2.mate_origin + CoordSystem(xDir=(1, 0.1, 0)), box1.mate_top ) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector(0, 0, box1.height)) self.assertEqual(coords.xDir, cadquery.Vector(1, -0.1, 0).normalized()) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) def test_translation(self): (box1, box2) = (Box(), Box()) # set box1 world location: sit it on top of xy plane box1.world_coords = CoordSystem((0, 0, box1.height / 2)) # +'ve translation c = Coincident( box2.mate_origin, box1.mate_top + CoordSystem(origin=(1, 2, 3)) ) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector(1, 2, 3 + box1.height)) self.assertEqual(coords.xDir, cadquery.Vector(1, 0, 0)) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) # -'ve translation c = Coincident( box2.mate_origin + CoordSystem(origin=(1, 2, 3)), box1.mate_top ) (part, coords) = list(solver([c]))[0] self.assertEqual(coords.origin, cadquery.Vector(-1, -2, -3 + box1.height)) self.assertEqual(coords.xDir, cadquery.Vector(1, 0, 0)) self.assertEqual(coords.zDir, cadquery.Vector(0, 0, 1)) class SolverOrderTests(CQPartsTest): # ------ Fixed & Coincident ------ # Fixed, Coincident def test_coincident_forward(self): (box1, box2) = (Box(), Box()) constraints = [ # stack box2 on box1 Fixed(box1.mate_bottom, CoordSystem()), Coincident(box2.mate_bottom, box1.mate_top), ] solution = solver(constraints) # 1st solution : box1 (part, coords) = next(solution) self.assertEqual(id(part), id(box1)) part.world_coords = coords # 2nd solution : box2 (part, coords) = next(solution) self.assertEqual(id(part), id(box2)) with self.assertRaises(StopIteration): next(solution) # Coincident, Fixed def test_coincident_backward(self): (box1, box2) = (Box(), Box()) constraints = [ # stack box2 on box1 (in reversed logical order) Coincident(box2.mate_bottom, box1.mate_top), Fixed(box1.mate_bottom, CoordSystem()), ] solution = solver(constraints) # 1st solution : box1 (part, coords) = next(solution) self.assertEqual(id(part), id(box1)) part.world_coords = coords # 2nd solution : box2 (part, coords) = next(solution) self.assertEqual(id(part), id(box2)) with self.assertRaises(StopIteration): next(solution) class BadSolverTests(CQPartsTest): def test_non_constraint(self): with self.assertRaises(ValueError): list(solver(['not_a_constraint']))
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_component.py
tests/t_cqparts/test_component.py
from base import CQPartsTest, CodecRegisterTests from base import testlabel # Unit(s) under test import cqparts from cqparts.utils import CoordSystem from cqparts.constraint import Mate from cqparts import codec class ComponentTests(CQPartsTest): def test_premature(self): c = cqparts.Component() with self.assertRaises(NotImplementedError): c.build() def test_world_coords(self): class C(cqparts.Component): def __init__(self, *args, **kwargs): self._flag_placement_changed = False super(C, self).__init__(*args, **kwargs) def _placement_changed(self): self._flag_placement_changed = True super(C, self)._placement_changed() c = C() self.assertIsNone(c.world_coords) cs = CoordSystem.random() self.assertFalse(c._flag_placement_changed) c.world_coords = cs self.assertTrue(c._flag_placement_changed) self.assertEquals(c.world_coords, cs) c.world_coords = None self.assertIsNone(c.world_coords) def test_mate_origin(self): c = cqparts.Component() mate = c.mate_origin self.assertEquals(id(mate.component), id(c)) self.assertEquals(mate.local_coords, CoordSystem()) class ImportExportTests(CodecRegisterTests): def test_exporter(self): @codec.register_exporter('abc', cqparts.Component) class Abc(codec.Exporter): pass c = cqparts.Component() exporter = c.exporter('abc') self.assertIsInstance(exporter, Abc) self.assertEquals(id(exporter.obj), id(c)) def test_importer(self): @codec.register_importer('abc', cqparts.Component) class Abc(codec.Importer): pass c = cqparts.Component() importer = c.importer('abc') self.assertIsInstance(importer, Abc) self.assertEquals(importer.cls, cqparts.Component)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/__init__.py
tests/t_cqparts/__init__.py
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_coordsystem.py
tests/t_cqparts/test_coordsystem.py
import mock from base import CQPartsTest from base import testlabel from cadquery import Plane, Vector import cadquery # Unit under test from cqparts.utils import CoordSystem class CoordSystemTests(CQPartsTest): @staticmethod def mat2list(m, digits=7): """Converts FreeCAD.Base.Matrix to a list""" return [ round(v, digits) for v in [ m.A11, m.A12, m.A13, m.A14, m.A21, m.A22, m.A23, m.A24, m.A31, m.A32, m.A33, m.A34, m.A41, m.A42, m.A43, m.A44 ] ] def assertMatrixAlmostEquals(self, first, second, places=6): """ :param first: matrix :type first: :class:`FreeCAD.Base.Matrix` :param second: list of 16 numbers (of a 4x4 matrix) :type second: :class:`list` """ for (a, b) in zip(self.mat2list(first), second): self.assertAlmostEqual(a, b, places=places) @staticmethod def boundbox2list(bb): return [ bb.xmin, bb.xmax, bb.ymin, bb.ymax, bb.zmin, bb.zmax, ] def assertBoundingBoxEquals(self, first, second, places=6): """ :param first: bounding box :type first: :class:`cadquery.BoundBox` :param second: list of ranges ``[xmin, xmax, ymin, ymax, zmin, zmax]`` :type second: :class:`list` """ for (a, b) in zip(self.boundbox2list(first), second): self.assertAlmostEqual(a, b, places=places) def test_from_plane(self): plane = Plane(origin=(1,2,3), xDir=(0,1,0), normal=(1,0,0)) cs = CoordSystem.from_plane(plane) self.assertEqual(cs.origin, Vector(1,2,3)) self.assertEqual(cs.xDir, Vector(0,1,0)) self.assertEqual(cs.yDir, Vector(0,0,1)) self.assertEqual(cs.zDir, Vector(1,0,0)) def test_from_matrix(self): from FreeCAD import Matrix # identity self.assertEqual( CoordSystem.from_transform(Matrix()), CoordSystem() ) # random #1 m = Matrix( -0.146655,-0.271161,-0.951296,0.0376659, -0.676234,0.729359,-0.103649,0.615421, 0.721942,0.628098,-0.290333,-0.451955, 0,0,0,1 ) cs = CoordSystem.from_transform(m) self.assertEqual(cs, CoordSystem( origin=(0.0376659, 0.615421, -0.451955), xDir=(-0.14665525299526946, -0.6762339076811328, 0.7219417835748246), normal=(-0.9512957880009034, -0.10364897690151711, -0.2903329352984416), )) # random #2 m = Matrix( 0.423408,-0.892837,-0.153517,-0.163654, -0.617391,-0.408388,0.672345,0.835824, -0.662989,-0.189896,-0.724144,0.632804, 0,0,0,1 ) cs = CoordSystem.from_transform(m) self.assertEqual(cs, CoordSystem( origin=(-0.163654, 0.835824, 0.632804), xDir=(0.4234078285564432, -0.6173904937335437, -0.6629892826920875), normal=(-0.15351701527110584, 0.672345066881529, -0.7241440720342351), )) def test_random(self): cs = CoordSystem.random() self.assertIsInstance(cs, CoordSystem) self.assertNotEqual(cs, CoordSystem()) # not an identity matrix # (false negative result is possible, but extremely unlikely) def test_random_seed(self): for i in range(1, 5): cs1 = CoordSystem.random(seed=i) cs2 = CoordSystem.random(seed=i) # same seed self.assertEqual(cs1, cs2) # result should be the same @mock.patch('random.uniform') def test_random_failsafe(self, mock_uniform): random_numbers = [ # 1st try (xDir & normal are parallel; error) 0, 0, 0, # origin 1, 0, 0, # xDir 1, 0, 0, # normal # 2nd try (valid data) 1, 2, 3, # origin 0, 1, 0, # xDir 1, 0, 0, # normal ] mock_uniform.side_effect = random_numbers cs = CoordSystem.random() self.assertEqual(len(mock_uniform.call_args_list), len(random_numbers)) self.assertEqual(cs, CoordSystem( origin=random_numbers[9:12], xDir=random_numbers[12:15], normal=random_numbers[15:18], )) def test_world2local(self): # random 1 cs = CoordSystem( origin=(-0.029, -0.222, 0.432), xDir=(0.556, -0.719, 0.417), normal=(0.779, 0.275, -0.564), ) self.assertMatrixAlmostEquals( cs.world_to_local_transform, [ 0.55584,-0.719063,0.417122,-0.323709, -0.290761,-0.638252,-0.712806,0.157808, 0.778781,0.274923,-0.563842,0.327197, 0,0,0,1 ] ) # random 2 cs = CoordSystem( origin=(-0.654, -0.75, 0.46), xDir=(-0.412, 0.906, -0.099), normal=(0.474, 0.306, 0.825), ) self.assertMatrixAlmostEquals( cs.world_to_local_transform, [ -0.412051,0.905744,-0.0992066,0.455462, -0.77801,-0.293074,0.555706,-0.984248, 0.474252,0.306163,0.825439,0.160081, 0,0,0,1 ] ) def test_local2world(self): # random 1 cs = CoordSystem( origin=(-0.03, 0.256, -0.246), xDir=(-0.018, -0.857, 0.514), normal=(-0.868, 0.268, 0.417), ) self.assertMatrixAlmostEquals( cs.local_to_world_transform, [ -0.0177607,0.49559,-0.868375,-0.03, -0.857519,0.439062,0.268116,0.256, 0.514146,0.74941,0.41718,-0.246, -0,0,0,1, ] ) # random 2 cs = CoordSystem( origin=(-0.539, -0.071, -0.17), xDir=(0.866, -0.189, -0.463), normal=(-0.468, -0.632, -0.618), ) self.assertMatrixAlmostEquals( cs.local_to_world_transform, [ 0.866118,0.175777,-0.467913,-0.539, -0.18881,-0.751715,-0.631882,-0.071, -0.462808,0.635631,-0.617885,-0.17, 0,-0,0,1, ] ) def test_arithmetic_add_coordsys(self): # random 1 cs = CoordSystem( origin=(0.319872,-0.424248,-0.813118), xDir=(0.301597,0.844131,-0.443263), normal=(0.518197,-0.535377,-0.666966), ) cs_add = CoordSystem( origin=(-0.965988,0.438111,0.447495), xDir=(-0.903357,0.322463,-0.282777), normal=(0.0176109,-0.630881,-0.77568), ) self.assertEqual( cs + cs_add, CoordSystem( origin=(0.6110520112439473, -1.4667419254474168, -0.42101284070314754), xDir=(-0.16091098866905712, -0.6019554630954405, 0.7821491380645386), normal=(-0.901549677957146, 0.41213999099584614, 0.13171486627298448), ) ) # random 2 cs = CoordSystem( origin=(0.997758,-0.429350,0.469693), xDir=(-0.949669,0.304061,-0.0753356), normal=(-0.265922,-0.655403,0.706917), ) cs_add = CoordSystem( origin=(0.604043,-0.918366,0.765700), xDir=(-0.208045,0.778042,0.592762), normal=(0.591826,-0.382369,0.709603), ) self.assertEqual( cs + cs_add, CoordSystem( origin=(0.3725549528751452, -0.1125950462339067, 1.6113356739288598), xDir=(-0.08887557530345871, -0.9896725888680419, -0.11246910223571256), normal=(-0.6874287881318142, -0.020766321770180177, 0.7259548340825087), ) ) def test_arithmetic_add_vector(self): # random 1 cs = CoordSystem( origin=(0.776696,-0.155044,0.464622), xDir=(-0.792263,-0.141302,0.593594), normal=(-0.586401,-0.0926133,-0.804709), ) v = Vector(0.894579,-0.282728,-0.428593) self.assertEqual( cs + v, Vector(0.3669727263895199, -0.5204201245493684, 1.3378492301899407) ) # random 2 cs = CoordSystem( origin=(-0.370354,-0.146263,-0.007179), xDir=(-0.96932,0.182199,-0.16499), normal=(0.244193,0.790464,-0.561726), ) v = Vector(-0.111398,-0.465007,-0.221905) self.assertEqual( cs + v, Vector(-0.3035072972382829, -0.613895258440105, -0.2411329328032198) ) def test_arithmetic_add_workplane(self): # random 1 cs = CoordSystem( origin=(-0.572012,0.137190,-0.927598), xDir=(0.877633,0.381758,0.289866), normal=(-0.465231,0.824021,0.32334), ) obj = cadquery.Workplane('XY').box(1,1,1) # unit cube self.assertBoundingBoxEquals( (cs + obj).val().BoundingBox(), [ -1.3011527973859502, 0.15712879738595031, -0.6750138361946009, 0.9493938361946012, -1.6845977586686152, -0.1705982413313848, ] ) # random 2 cs = CoordSystem( origin=(0.092874,0.472599,0.277811), xDir=(0.559151,0.828735,-0.0234319), normal=(0.399938,-0.244868,0.883227), ) obj = cadquery.Workplane('XY').box(1,1,1) # unit cube self.assertBoundingBoxEquals( (cs + obj).val().BoundingBox(), [ -0.7497819487047321, 0.9355299487047323, -0.31581638155097225, 1.261014381550972, -0.4096982706849824, 0.9653202706849824, ] ) def test_arithmetic_add_bad_type(self): with self.assertRaises(TypeError): CoordSystem.random() + 1 with self.assertRaises(TypeError): CoordSystem.random() + 'bad_type' with self.assertRaises(TypeError): CoordSystem.random() + None def test_arithmetic_sub_coordsys(self): # random 1 cs = CoordSystem( origin=(0.995014,0.597397,0.251518), xDir=(-0.701536,-0.665758,0.254191), normal=(0.135645,0.225422,0.964772), ) cs_sub = CoordSystem( origin=(-0.320574,0.951257,0.176344), xDir=(-0.744255,-0.650638,-0.150844), normal=(0.419232,-0.279276,-0.863858), ) self.assertEqual( cs - cs_sub, CoordSystem( origin=(-0.7602379451931977, -0.9700309903527986, 0.5854211817688126), xDir=(0.9169464676048765, -0.22755650176590822, -0.32776090988859785), normal=(-0.39315299213245875, -0.37502955527701604, -0.8395138816279446), ) ) # random 2 cs = CoordSystem( origin=(-0.980361,0.591789,-0.073316), xDir=(-0.27988,-0.085973,-0.956178), normal=(0.755724,0.59451,-0.27466), ) cs_sub = CoordSystem( origin=(0.480657,0.627596,0.409464), xDir=(-0.0929824,-0.728202,0.679026), normal=(0.549731,0.531063,0.644801), ) self.assertEqual( cs - cs_sub, CoordSystem( origin=(-0.1658962438630106, -1.02792754287956, -1.133479452321362), xDir=(-0.5606397275073202, 0.1404609281661355, -0.8160599387295184), normal=(-0.6896940658874535, 0.466189492307297, 0.5540662891224277), ) ) def test_arithmetic_sub_bad_type(self): with self.assertRaises(TypeError): CoordSystem.random() - 1 with self.assertRaises(TypeError): CoordSystem.random() - 'bad_type' with self.assertRaises(TypeError): CoordSystem.random() - None obj = cadquery.Workplane('XY').box(1, 1, 1) with self.assertRaises(TypeError): CoordSystem.random() - obj v = Vector(1, 2, 3) with self.assertRaises(TypeError): CoordSystem.random() - v def test_repr(self): repr_str = repr(CoordSystem.random()) self.assertIsInstance(repr_str, str) self.assertTrue(bool(repr_str))
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_search.py
tests/t_cqparts/test_search.py
from collections import defaultdict from base import CQPartsTest from base import testlabel from partslib import Box, Cylinder # Unit under test import cqparts from cqparts.search import register from cqparts.search import search, find from cqparts.search import common_criteria from cqparts.errors import SearchNoneFoundError, SearchMultipleFoundError class ClearSearchIndexTests(CQPartsTest): def setUp(self): super(ClearSearchIndexTests, self).setUp() # retain original values self.orig_index = cqparts.search.index self.orig_class_list = cqparts.search.class_list # clear values cqparts.search.index = defaultdict(lambda: defaultdict(set)) cqparts.search.class_list = set() def tearDown(self): super(ClearSearchIndexTests, self).tearDown() # restore original values cqparts.search.index = self.orig_index cqparts.search.class_list = self.orig_class_list class RegisterTests(ClearSearchIndexTests): def test_register(self): _Box = register(a=1, b=2)(Box) self.assertEqual(cqparts.search.index, { 'a': {1: set([Box])}, 'b': {2: set([Box])}, }) self.assertEqual(cqparts.search.class_list, set([Box])) def test_register_duplicate_criteria(self): _Box = register(a=1, b=2)(Box) _Cyl = register(b=2, c=3)(Cylinder) self.assertEqual(cqparts.search.index, { 'a': {1: set([Box])}, 'b': {2: set([Box, Cylinder])}, 'c': {3: set([Cylinder])}, }) self.assertEqual(cqparts.search.class_list, set([Box, Cylinder])) class SearchFindTests(ClearSearchIndexTests): def setUp(self): super(SearchFindTests, self).setUp() self.Box = register(a=1, b=2)(Box) self.Cyl = register(b=2, c=3)(Cylinder) def test_search_single(self): self.assertEqual(search(a=1), set([self.Box])) self.assertEqual(search(c=3), set([self.Cyl])) def test_search_multiple(self): self.assertEqual(search(b=2), set([self.Box, self.Cyl])) def test_search_noresults(self): self.assertEqual(search(a=10), set()) def test_find(self): self.assertEqual(find(a=1), self.Box) self.assertEqual(find(c=3), self.Cyl) def test_find_multiple(self): with self.assertRaises(SearchMultipleFoundError): find(b=2) def test_find_noresults(self): with self.assertRaises(SearchNoneFoundError): find(a=10) class CommonCriteriaTests(ClearSearchIndexTests): def test_common_criteria(self): def foo(**kwargs): return kwargs self.assertEqual(foo(a=1), {'a': 1}) bar = common_criteria(b=2)(foo) self.assertEqual(bar(a=1), {'a': 1, 'b': 2})
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_display.py
tests/t_cqparts/test_display.py
import unittest import mock import sys import cadquery from base import CQPartsTest from base import testlabel from base import skip_if_no_freecad from base import suppress_stdout_stderr from partslib import Box from partslib import CubeStack # Unit under test import cqparts from cqparts import display # Forgiving mock.patch wrapper: # Wrap mock.patch to not fall over on an import error. # negative side: This promotes test-failures to be runtime instead of # compile-time, however... # positive side: These tests should be skipped anyway... see reasoning # in more detail below def patch_forgiving(*args, **kwargs): def decorator(func): try: return mock.patch(*args, **kwargs)(func) except (ImportError, AttributeError): # Patch module or attribute could not be patched. # return something nonetheless. def inner(*args, **kwargs): #args.append(mock.MagicMock()) args.append(None) # should fail in any testcase return func(*args, **kwargs) return inner return decorator # Skip if no FreeCAD: # Skip Logic: # Test is skipped if necessary FreeCAD Python module(s) cannot be imported. # Justification: # cqparts should not require FreeCAD to be installed to function. # This IS currently a problem with cadquery, which is being worked on # to resolve... but that limitation should not follow through to cqparts. # So if the OS of the host test machine does not have FreeCAD, then these # tests are skipped in favour of failing. @unittest.skipIf(*skip_if_no_freecad()) class FreeCADTests(CQPartsTest): @patch_forgiving('Helpers.show') def test_part(self, mock_show): part = Box() disp_env = display.freecad.FreeCADDisplayEnv() disp_env.display(part) mock_show.assert_called_once_with(part.local_obj, (192, 192, 192, 0)) @patch_forgiving('Helpers.show') def test_assembly(self, mock_show): assembly = CubeStack() disp_env = display.freecad.FreeCADDisplayEnv() disp_env.display(assembly) self.assertEqual(len(mock_show.call_args_list), 2) # 2 objects for (args, kwargs) in mock_show.call_args_list: self.assertIn(args[0], map(lambda x: x.world_obj, assembly.components.values())) @patch_forgiving('Helpers.show') def test_cadquery_obj(self, mock_show): obj = cadquery.Workplane('XY').box(1,1,1) disp_env = display.freecad.FreeCADDisplayEnv() disp_env.display(obj) mock_show.assert_called_once_with(obj) @testlabel('web') class WebTests(CQPartsTest): @mock.patch('time.sleep', mock.Mock(side_effect=KeyboardInterrupt())) @mock.patch('webbrowser.open') @mock.patch(('socketserver' if sys.version_info[0] >= 3 else 'SocketServer') + '.ThreadingTCPServer') def test_basic(self, mock_serverobj, mock_webbrowser_open): # setup mocks mock_server = mock.MagicMock(server_address=('abc', 123)) mock_serverobj.return_value = mock_server part = Box() disp_env = display.web.WebDisplayEnv() with suppress_stdout_stderr(): disp_env.display(part) # webbrowser.open called self.assertEquals(len(mock_webbrowser_open.call_args_list), 1) mock_webbrowser_open.assert_called_once_with('http://abc:123/') # SocketServer.ThreadingTCPServer.serve_forever called mock_server.serve_forever.assert_called_once_with() def test_bad_component(self): disp_env = display.web.WebDisplayEnv() with self.assertRaises(TypeError): disp_env.display(123) @testlabel('web') class CQPartsServerTests(CQPartsTest): @mock.patch('requests.get') @mock.patch('requests.post') @mock.patch('os.environ', {'CQPARTS_SERVER': 'http://abc:123'}) def test_basic(self, mock_requests_post, mock_requests_get): part = Box() disp_env = display.cqparts_server.CQPartsServerDisplayEnv() disp_env.display(part) # status mock_requests_get.assert_called_once_with('http://abc:123/status') self.assertEqual(len(mock_requests_post.call_args_list), 2) # upload (args, kwargs) = mock_requests_post.call_args_list[0] self.assertEqual(args, ('http://abc:123/upload',)) self.assertEqual(len(kwargs['files']), 2) # notify (args, kwargs) = mock_requests_post.call_args_list[1] self.assertEqual(args, ('http://abc:123/notify',)) def _raise_connectionerror(*args, **kwargs): import requests raise requests.exceptions.ConnectionError @mock.patch('requests.get', _raise_connectionerror) @mock.patch('requests.post') @mock.patch('os.environ', {'CQPARTS_SERVER': 'http://abc:123'}) def test_no_connection(self, mock_requests_post): part = Box() disp_env = display.cqparts_server.CQPartsServerDisplayEnv() disp_env.display(part) # no error, and no further contact attempted mock_requests_post.assert_not_called() @mock.patch('os.environ', {}) # empty def test_bad_env(self): disp_env = display.cqparts_server.CQPartsServerDisplayEnv() with self.assertRaises(KeyError): disp_env.display(Box()) @mock.patch('os.environ', {'CQPARTS_SERVER': 'http://abc:123'}) def test_bad_component(self): disp_env = display.cqparts_server.CQPartsServerDisplayEnv() with self.assertRaises(TypeError): disp_env.display(123)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_assembly.py
tests/t_cqparts/test_assembly.py
import re from base import CQPartsTest from base import testlabel from partslib import Box, Cylinder, CubeStack, SimpleCar from partslib import simplecar # Unit under test import cqparts from cqparts.constraint import Fixed, Coincident from cqparts.utils import CoordSystem from cqparts.errors import AssemblyFindError class BadAssemblyTests(CQPartsTest): # pre-mature assembiles def test_no_make_components(self): class A(cqparts.Assembly): pass with self.assertRaises(NotImplementedError): A().components def test_no_make_constraints(self): class A(cqparts.Assembly): def make_components(self): return {'p': Box()} with self.assertRaises(NotImplementedError): A().components # bad returns def test_bad_component_return(self): class A(cqparts.Assembly): def make_components(self): return 123 with self.assertRaises(TypeError): A().components def test_bad_component_yield(self): class A(cqparts.Assembly): def make_components(self): yield 123 with self.assertRaises(TypeError): A().components def test_bad_component_value(self): class A(cqparts.Assembly): def make_components(self): yield { 'p': Box(), # good component 'x': 123, # bad component } with self.assertRaises(ValueError): A().components def test_bad_component_key(self): class A(cqparts.Assembly): def make_components(self): yield { 'p': Box(), # good key 1: Box(), # bad key (must be a string) } with self.assertRaises(ValueError): A().components def test_bad_component_keychar_period(self): class A(cqparts.Assembly): def make_components(self): yield { 'p': Box(), # good key 'a.b': Box(), # key can't contain a '.' #147 } with self.assertRaises(ValueError): A().components def test_bad_component_keychar_dash(self): class A(cqparts.Assembly): def make_components(self): yield { 'p': Box(), # good key 'a-b': Box(), # key can't contain a '-' #147 } with self.assertRaises(ValueError): A().components def test_bad_constraint_return(self): class A(cqparts.Assembly): def make_components(self): return {'p': Box()} def make_constraints(self): return 123 with self.assertRaises(TypeError): A().components def test_bad_constraint_yield(self): class A(cqparts.Assembly): def make_components(self): return {'p': Box()} def make_constraints(self): yield 123 with self.assertRaises(TypeError): A().components def test_bad_constraint_value(self): class A(cqparts.Assembly): def make_components(self): return {'p': Box()} def make_constraints(self): return [ Fixed(self.components['p'].mate_origin), # good value 123, # bad value ] with self.assertRaises(ValueError): A().components class BuildCycleTests(CQPartsTest): def test_standard(self): # Define test components class P(Box): def __init__(self, *args, **kwargs): self._built = False super(P, self).__init__(*args, **kwargs) def make(self): self._built = True return super(P, self).make() class A(cqparts.Assembly): def __init__(self, *args, **kwargs): self._flags = [] super(A, self).__init__(*args, **kwargs) def build(self, *args, **kwargs): self._flags.append('build') return super(A, self).build(*args, **kwargs) def make_components(self): self._flags.append('cmp') return {'p1': P(), 'p2': P()} def make_constraints(self): self._flags.append('con') (p1, p2) = (self.components['p1'], self.components['p2']) return [ Fixed(p1.mate_origin), Coincident(p2.mate_bottom, p1.mate_top), ] # .components asm = A() self.assertEqual(asm._flags, []) asm.components # key stimulus self.assertEqual(asm._flags, ['build', 'cmp', 'con']) for name in ['p1', 'p2']: self.assertIsNotNone(asm.components[name].world_coords) self.assertFalse(asm.components[name]._built) # Build asm = A() asm.build(recursive=False) # key stimulus self.assertEqual(asm._flags, ['build', 'cmp', 'con']) for name in ['p1', 'p2']: self.assertIsNotNone(asm.components[name].world_coords) self.assertFalse(asm.components[name]._built) # Build Recursively asm = A() asm.build(recursive=True) # key stimulus self.assertEqual(asm._flags, ['build', 'cmp', 'con']) for name in ['p1', 'p2']: self.assertIsNotNone(asm.components[name].world_coords) self.assertTrue(asm.components[name]._built) # True def test_multi_pass(self): class P(Box): def __init__(self, *args, **kwargs): self._built = False super(P, self).__init__(*args, **kwargs) def make(self): self._built = True return super(P, self).make() class A(cqparts.Assembly): def __init__(self, *args, **kwargs): self._flags = [] super(A, self).__init__(*args, **kwargs) def build(self, *args, **kwargs): self._flags.append('build') return super(A, self).build(*args, **kwargs) def make_components(self): self._flags.append('cmp1') yield {'p1': P()} self._flags.append('cmp2') yield {'p2': P()} self._flags.append('cmp3') def make_constraints(self): self._flags.append('con1') p1 = self.components['p1'] yield [Fixed(p1.mate_origin)] self._flags.append('con2') p2 = self.components['p2'] yield [Coincident(p2.mate_bottom, p1.mate_top)] self._flags.append('con3') def solve(self, *args, **kwargs): self._flags.append('solve') return super(A, self).solve(*args, **kwargs) expected_flag_order = [ 'build', 'cmp1', 'con1', 'solve', # 1st pass 'cmp2', 'con2', 'solve', # 2nd pass 'cmp3', 'con3', # final pass, nothing yielded ] # .components asm = A() self.assertEqual(asm._flags, []) asm.components self.assertEqual(asm._flags, expected_flag_order) for name in ['p1', 'p2']: self.assertIsNotNone(asm.components[name].world_coords) self.assertFalse(asm.components[name]._built) # Build asm = A() asm.build(recursive=False) # key stimulus self.assertEqual(asm._flags, expected_flag_order) for name in ['p1', 'p2']: self.assertIsNotNone(asm.components[name].world_coords) self.assertFalse(asm.components[name]._built) # Build Recursively asm = A() asm.build(recursive=True) # key stimulus self.assertEqual(asm._flags, expected_flag_order) for name in ['p1', 'p2']: self.assertIsNotNone(asm.components[name].world_coords) self.assertTrue(asm.components[name]._built) # True class TreeStringTests(CQPartsTest): def test_one_layer(self): obj = CubeStack() tree_str = obj.tree_str(name='cubestack') self.assertEqual(tree_str, '\n'.join([ u"cubestack", u" \u251c\u25cb cube_a", u" \u2514\u25cb cube_b\n", ])) def test_two_layer(self): obj = SimpleCar() tree_str = obj.tree_str(name='car') self.assertEqual(tree_str, '\n'.join([ u"car", u" \u251c\u2500 back_wheels", u" \u2502 \u251c\u25cb axle", u" \u2502 \u251c\u25cb wheel_left", u" \u2502 \u2514\u25cb wheel_right", u" \u251c\u25cb chassis", u" \u2514\u2500 front_wheels", u" \u251c\u25cb axle", u" \u251c\u25cb wheel_left", u" \u2514\u25cb wheel_right\n", ])) def test_prefix(self): obj = CubeStack() tree_str = obj.tree_str(name='cubestack', prefix="--") self.assertEqual(tree_str, '\n'.join([ u"--cubestack", u"-- \u251c\u25cb cube_a", u"-- \u2514\u25cb cube_b\n", ])) def test_repr(self): obj = CubeStack() # no repr tree_str = obj.tree_str(name='cubestack', add_repr=False) for line in tree_str.rstrip('\n').split('\n'): self.assertIsNone(re.search(r'<[^>]+>', line)) # repr on each line tree_str = obj.tree_str(name='cubestack', add_repr=True) for line in tree_str.rstrip('\n').split('\n'): self.assertIsNotNone(re.search(r'<[^>]+>', line)) # repeat: nested obj obj = SimpleCar() tree_str = obj.tree_str(name='car', add_repr=True) for line in tree_str.rstrip('\n').split('\n'): self.assertIsNotNone(re.search(r'<[^>]+>', line)) class SearchTests(CQPartsTest): def test_1st_layer(self): car = SimpleCar() self.assertIsInstance(car.find('chassis'), simplecar.Chassis) # part self.assertIsInstance(car.find('front_wheels'), simplecar.AxleAsm) # assembly def test_2nd_layer_period(self): car = SimpleCar() self.assertIsInstance(car.find('front_wheels.axle'), simplecar.Axle) self.assertIsInstance(car.find('front_wheels.wheel_left'), simplecar.Wheel) def test_2nd_layer_dash(self): car = SimpleCar() self.assertIsInstance(car.find('front_wheels-axle'), simplecar.Axle) self.assertIsInstance(car.find('front_wheels-wheel_left'), simplecar.Wheel) def test_bad_paths(self): car = SimpleCar() bad_search_keys = [ 'nope', # 1st layer part doesn't exist 'chassis.foo', # part doesn't have components 'front_wheels.no_exist', # asm component doesn't exist 'front_wheels.axle.no_exist', # part has no components ] for search_key in bad_search_keys: with self.assertRaises(AssemblyFindError): car.find(search_key) class BoundingBoxTests(CQPartsTest): def test_single_layer(self): obj = CubeStack() bb = obj.bounding_box self.assertAlmostEqual( (bb.xmin, bb.ymin, bb.zmin, bb.xmax, bb.ymax, bb.zmax), (-1, -1, 0, 1, 1, 3), places=1 ) def test_nested(self): obj = SimpleCar() bb = obj.bounding_box self.assertAlmostEqual( (bb.xmin, bb.ymin, bb.zmin, bb.xmax, bb.ymax, bb.zmax), (-55, -35, -45, 55, 35, 25), places=1 )
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts/test_catalogue.py
tests/t_cqparts/test_catalogue.py
import os import tempfile import mock from base import CQPartsTest from base import testlabel from partslib import Box # Unit under test from cqparts.catalogue import JSONCatalogue from cqparts.errors import SearchNoneFoundError, SearchMultipleFoundError class JSONCatalogueTests(CQPartsTest): def setUp(self): handle, self.filename = tempfile.mkstemp() os.close(handle) def tearDown(self): os.unlink(self.filename) def get_catalogue(self): return JSONCatalogue(self.filename) def populate_catalogue(self, catalogue): catalogue.add('id1', Box(length=20), criteria={'a': 0, 'b': 1}) catalogue.add('id2', Box(width=20), criteria={'a': 0, 'b': 2}) catalogue.add('id3', Box(height=20), criteria={'a': 1, 'b': 2}) def test_init_status(self): c = self.get_catalogue() self.assertEqual(c.db.tables(), set([ 'items', JSONCatalogue._dbinfo_name, ])) self.assertEqual(len(c.db.table('items').all()), 0) self.assertEqual(len(c.db.table('_default').all()), 0) def test_clean(self): c = JSONCatalogue(self.filename) self.populate_catalogue(c) self.assertGreater(len(c.items.all()), 0) c.close() c = JSONCatalogue(self.filename, clean=True) # should clear catalogue self.assertEquals(len(c.items.all()), 0) @mock.patch('tinydb.TinyDB') def test_name(self, mock_tinydb): c = self.get_catalogue() self.assertTrue(bool(c.name)) def test_dbinfo_table(self): # open new catalogue c = self.get_catalogue() self.assertIn(c._dbinfo_name, c.db.tables()) self.assertEqual(len(c._dbinfo_table.all()), 1) self.assertIsInstance(c.dbinfo, dict) self.assertEquals(c.dbinfo['lib'], 'cqparts') # manually change content table = c._dbinfo_table table.update({'lib': 'TEST_CONTENT'}) # re-open and confirm dbinfo content isn't clobbered c.close() c = self.get_catalogue() self.assertEqual(len(c._dbinfo_table.all()), 1) # still just 1 entry self.assertEquals(c.dbinfo['lib'], 'TEST_CONTENT') def test_add_single_item(self): c = self.get_catalogue() self.assertEqual(len(c.items.all()), 0) for (id_str, count) in [('id1', 1), ('id2', 2)]: # add box to db, make sure it was added c.add(id_str, Box()) self.assertEqual(len(c.items.all()), count) item = c.items.all()[count - 1] self.assertEqual(item['id'], id_str) self.assertEqual(item['criteria'], {}) # object serialization tests are out of scope self.assertEqual(set(item['obj'].keys()), set(['params', 'class', 'lib'])) def test_add_duplicate_id(self): c = self.get_catalogue() c.add('id1', Box()) self.assertRaises(ValueError, c.add, 'id1', Box()) def test_add_replace_id(self): c = self.get_catalogue() c.add('id1', Box(), criteria={'try': 1, 'old': 'foo'}) c.add('id1', Box(), criteria={'try': 2, 'new': 'bar'}, force=True) self.assertEqual(len(c.items.all()), 1) item = c.items.all()[0] self.assertNotIn('old', item['criteria']) self.assertIn('new', item['criteria']) def test_add_not_component(self): c = self.get_catalogue() self.assertRaises(TypeError, c.add, 'foo', 'not_a_component') def test_search(self): c = self.get_catalogue() self.populate_catalogue(c) id_set = lambda r: set(map(lambda i: i['id'], r)) item = c.get_query() results = c.search(item.criteria.a == 0) # 2 results self.assertEquals(id_set(results), set(['id1', 'id2'])) results = c.search(item.criteria.a == 100) # no results self.assertEquals(len(results), 0) def test_find(self): c = self.get_catalogue() self.populate_catalogue(c) item = c.get_query() result = c.find(item.criteria.a == 1) result['id'] == 'id1' def test_find_noresult(self): c = self.get_catalogue() self.populate_catalogue(c) item = c.get_query() self.assertRaises(SearchNoneFoundError, c.find, item.criteria.a == 100) def test_find_multipleresults(self): c = self.get_catalogue() self.populate_catalogue(c) item = c.get_query() self.assertRaises(SearchMultipleFoundError, c.find, item.criteria.a == 0) def test_deserialize(self): c = self.get_catalogue() c.add('id1', Box()) c.close() # re-open catalogue c = self.get_catalogue() i = c.get_query() item = c.find(i.id == 'id1') obj = c.deserialize_item(item) self.assertIsInstance(obj, Box) def test_get(self): c = self.get_catalogue() c.add('id1', Box()) i = c.get_query() obj = c.get(i.id == 'id1') self.assertIsInstance(obj, Box)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/partslib/simplecar.py
tests/partslib/simplecar.py
import cadquery import cqparts from cqparts.params import * from cqparts.constraint import Mate from cqparts.constraint import Fixed, Coincident from cqparts.utils import CoordSystem # A greatly simplified version of the tutorial toy car class Wheel(cqparts.Part): width = PositiveFloat(10, doc="wheel width") radius = PositiveFloat(20, doc="wheel radius") def make(self): return cadquery.Workplane('XY').circle(self.radius).extrude(self.width) @property def mate_connect(self): return Mate(self, CoordSystem(normal=(0, 0, 1))) class Axle(cqparts.Part): length = PositiveFloat(50, doc="axle length") radius = PositiveFloat(5, doc="axle radius") def make(self): return cadquery.Workplane('XZ', origin=(0, self.length / 2, 0)) \ .circle(self.radius).extrude(self.length) @property def mate_left(self): return Mate(self, CoordSystem( origin=(0, self.length / 2, 0), normal=(0, 1, 0), )) @property def mate_right(self): return Mate(self, CoordSystem( origin=(0, -self.length / 2, 0), normal=(0, -1, 0), )) class AxleAsm(cqparts.Assembly): wheel_radius = PositiveFloat(20, doc="wheel radii") axle_length = PositiveFloat(50, doc="length of axles") def make_components(self): return { 'axle': Axle(length=self.axle_length), 'wheel_left': Wheel(radius=self.wheel_radius), 'wheel_right': Wheel(radius=self.wheel_radius), } def make_constraints(self): axle = self.components['axle'] wheel_left = self.components['wheel_left'] wheel_right = self.components['wheel_right'] return [ Fixed(axle.mate_origin), Coincident(wheel_left.mate_connect, axle.mate_left), Coincident(wheel_right.mate_connect, axle.mate_right), ] class Chassis(cqparts.Part): length = PositiveFloat(100, doc="chassis length") width = PositiveFloat(50, doc="chassis length") height = PositiveFloat(50, doc="chassis length") wheelbase = PositiveFloat(70, doc="distance between axles") def make(self): return cadquery.Workplane('XY') \ .box(self.length, self.width, self.height) @property def mate_front_axle(self): return Mate(self, CoordSystem(origin=(self.wheelbase / 2, 0, -self.height / 2))) @property def mate_back_axle(self): return Mate(self, CoordSystem(origin=(-self.wheelbase / 2, 0, -self.height / 2))) class SimpleCar(cqparts.Assembly): width = PositiveFloat(50, doc="chassis width") length = PositiveFloat(100, doc="chassis length") wheelbase = PositiveFloat(70, doc="distance between axles") wheel_radius = PositiveFloat(20, doc="wheel radii") def make_components(self): return { 'chassis': Chassis(length=self.length, width=self.width, wheelbase=self.wheelbase), 'front_wheels': AxleAsm(wheel_radius=self.wheel_radius, axle_length=self.width), 'back_wheels': AxleAsm(wheel_radius=self.wheel_radius, axle_length=self.width), } def make_constraints(self): chassis = self.components['chassis'] front = self.components['front_wheels'] back = self.components['back_wheels'] return [ Fixed(chassis.mate_origin), Coincident(front.mate_origin, chassis.mate_front_axle), Coincident(back.mate_origin, chassis.mate_back_axle), ]
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/partslib/basic.py
tests/partslib/basic.py
import cadquery import cqparts from cqparts.params import * from cqparts.constraint import Mate from cqparts.constraint import Fixed, Coincident from cqparts.utils import CoordSystem # --------------- Basic Parts ---------------- # Parts with the express intent of being concise, and quick to process class Box(cqparts.Part): """ Rectangular solid centered on the XY plane, centered on the z-axis """ length = PositiveFloat(1, doc="box length (along x-axis)") width = PositiveFloat(1, doc="box width (along y-axis)") height = PositiveFloat(1, doc="box height (along z-axis)") def make(self): return cadquery.Workplane('XY').box( self.length, self.width, self.height, ) @property def mate_top(self): return Mate(self, CoordSystem( origin=(0, 0, self.height / 2) )) @property def mate_bottom(self): return Mate(self, CoordSystem( origin=(0, 0, -self.height / 2), )) class Cylinder(cqparts.Part): """ Cylinder with circular faces parallel with XY plane, length centered on the XY plane. """ length = PositiveFloat(1, doc="cylinder length (along z-axis)") radius = PositiveFloat(1, doc="cylinder radius") def make(self): return cadquery.Workplane('XY', origin=(0, 0, -self.length / 2)) \ .circle(self.radius).extrude(self.length) @property def mate_top(self): return Mate(self, CoordSystem( origin=(0, 0, self.height / 2) )) @property def mate_bottom(self): return Mate(self, CoordSystem( origin=(0, 0, -self.height / 2), )) class CubeStack(cqparts.Assembly): """ 2 Cubes, one stacked on top of the other. """ size_a = PositiveFloat(2, doc="size of base cube") size_b = PositiveFloat(1, doc="size of top cube") def make_components(self): return { 'cube_a': Box(length=self.size_a, width=self.size_a, height=self.size_a), 'cube_b': Box(length=self.size_b, width=self.size_b, height=self.size_b), } def make_constraints(self): cube_a = self.components['cube_a'] cube_b = self.components['cube_b'] return [ Fixed(cube_a.mate_bottom), Coincident(cube_b.mate_bottom, cube_a.mate_top), ] @property def mate_top(self): return Mate(self, CoordSystem( origin=(0, 0, self.size_a + self.size_b), ))
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/partslib/__init__.py
tests/partslib/__init__.py
__all__ = [ 'Box', 'Cylinder', 'CubeStack', 'SimpleCar', ] from .basic import Box from .basic import Cylinder from .basic import CubeStack from .simplecar import SimpleCar
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/screwdrives.py
tests/manual/screwdrives.py
#!/usr/bin/env python import os import sys import math import argparse import cadquery import cqparts from cqparts.params import * from cqparts.constraint import * from cqparts.utils import CoordSystem from cqparts.display import display, render_props import cqparts_fasteners from cqparts_fasteners.solidtypes.screw_drives import * from cqparts.utils.env import env_name import logging log = logging.getLogger(__name__) # ---- create commandline parser parser = argparse.ArgumentParser(description='Display screw drives.') parser.add_argument('-l', '--list', action='store_const', const=True, default=False, help="list possible screw drive names") parser.add_argument('name', nargs='?', help="name of screw drive") parser.add_argument('-a', '--alpha', type=float, default=0.5 if env_name == 'freecad' else 1.0, help="alpha of each part") args = parser.parse_args() # Get list of names name_sets = [ cqparts.search.class_criteria[cls].get('name', set()) for cls in cqparts_fasteners.solidtypes.screw_drives.search() ] names_list = set() for name_set in name_sets: names_list |= name_set names_list = sorted(names_list) if args.list: # List screw drives and exit for name in names_list: print(" - %s" % name) exit(0) class ScrewDriveParam(Parameter): """ Screw drive parameter, finds a screw drive type based its name. """ def type(self, value): if isinstance(value, str): return cqparts_fasteners.solidtypes.screw_drives.find(name=value)() raise ValueError() class ScrewDriveBox(cqparts.Part): """ A box with a screw drive indentation cut out of the top face. """ drive = ScrewDriveParam(None) size = PositiveFloat(5) height = PositiveFloat(5) _render = render_props(alpha=args.alpha) def make(self): box = cadquery.Workplane('XY').rect(self.size, self.size) \ .extrude(-self.height) box = self.drive.apply(box) return box class NamesParam(Parameter): """ List of screw-drive names """ def type(self, value): if isinstance(value, (list, tuple)): return value raise ValueError() class Showcase(cqparts.Assembly): """ Collection of screw drive boxes, laid out in a square pattern """ names = NamesParam() box_size = PositiveFloat(5) box_height = PositiveFloat(5) gap = PositiveFloat(2) def make_components(self): components = {} for name in self.names: components[name] = ScrewDriveBox( drive=name, size=self.box_size, height=self.box_height, ) return components def make_constraints(self): constraints = [] index_width = int(math.sqrt(len(self.names))) for (i, name) in enumerate(self.names): (row, col) = ((i % index_width), int(i / index_width)) constraints.append(Fixed( self.components[name].mate_origin, CoordSystem(origin=( row * (self.box_size + self.gap), -col * (self.box_size + self.gap), 0 )), )) return constraints names = names_list if args.name: # single names = [args.name] showcase = Showcase(names=names) display(showcase)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/block_tree.py
tests/manual/block_tree.py
#!/usr/bin/env python import os import sys import inspect if 'MYSCRIPT_DIR' in os.environ: _this_path = os.environ['MYSCRIPT_DIR'] else: _this_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.join(_this_path, '..', '..', 'src')) #cadquery_path = os.path.join(_this_path, '..', '..', '..', 'cadquery') #if os.path.exists(cadquery_path): # sys.path.insert(0, cadquery_path) from math import ( sin, cos, tan, radians, ) import cadquery from Helpers import show import logging try: cadquery.freecad_impl.console_logging.enable(logging.INFO) except AttributeError: pass # outdated cadquery, no worries log = logging.getLogger() #log.info("----------------- Block Tree ----------------") import cqparts from cqparts.params import * from cqparts.constraint import Mate from cqparts.constraint import Fixed, Coincident from cqparts.utils.geometry import CoordSystem from cqparts.display import display, render_props # Block Tree? # # This is a simple concept intended to test mating parts. # There are 2 types of part: # - a branch; cylinder # - a splitter; "house" shaped block (a rectangle with a triangle on top) # like this: /\ # / \ # | | # |____| # # Mates are positioned in the Part instances, which are used by the Assembly. # These building blocks are used to create a sort of wooden "tree". # ------------------- Parts -------------------- class Branch(cqparts.Part): """ cylindrical branch to between """ diameter = PositiveFloat(5, doc="diameter of cylinder") height = PositiveFloat(10, doc="cylinder's height") twist = Float(0, doc="twist angle of mount (degrees)") _render = render_props(template='glass') def make(self): return cadquery.Workplane("XY") \ .circle(self.diameter / 2) \ .extrude(self.height) @property def mate_top(self): # Mate point at the top of the cylinder, twist applied return Mate(self, CoordSystem.from_plane( self.local_obj.faces(">Z").workplane().plane.rotated((0, 0, self.twist)) )) class Splitter(cqparts.Part): """ A house-shaped thingy to attach more branches to """ width = PositiveFloat(10, doc="base width") height = PositiveFloat(12, doc="total height") angle_left = PositiveFloat(30, doc="angle of roof left (degrees)") angle_right = PositiveFloat(30, doc="angle of roof right (degrees)") _render = render_props(template='glass') def __init__(self, *args, **kwargs): super(Splitter, self).__init__(*args, **kwargs) # Calculate wall heights, they're used for construction, and mates self.left_wall_height = self.height - ((self.width / 2) * tan(radians(self.angle_left))) self.right_wall_height = self.height - ((self.width / 2) * tan(radians(self.angle_right))) assert self.left_wall_height > 0, "bad left angle" assert self.right_wall_height > 0, "bad right angle" def make(self): points = [ # base (-self.width / 2, 0), (self.width / 2, 0), # roof (self.width / 2, self.right_wall_height), (0, self.height), (-self.width / 2, self.left_wall_height), ] obj = cadquery.Workplane("XZ", origin=(0, self.width / 2, 0)) \ .move(*points[0]).polyline(points[1:]).close() \ .extrude(self.width) return obj @property def mate_left(self): """Mate point in the center of the angled face on the left""" # TODO: query self.local_obj geometry to get center of face? return Mate(self, CoordSystem( origin=(-self.width / 4, 0, (self.height + self.left_wall_height) / 2), xDir=(0,1,0), normal=(-sin(radians(self.angle_left)), 0, cos(radians(self.angle_left))) )) @property def mate_right(self): """Mate point in the center of the angled face on the right""" # TODO: query self.local_obj geometry to get center of face? return Mate(self, CoordSystem( origin=(self.width / 4, 0, (self.height + self.right_wall_height) / 2), xDir=(0,1,0), normal=(sin(radians(self.angle_right)), 0, cos(radians(self.angle_right))) )) # ------------------- Tree -------------------- # note: each assembly has parts all the same colour to (show grouping). # # Tree's hierarchy: # [red] trunk + left branch are all in a single assembly # [green] right branch (Branch + Splitter + 2 branches) # [blue] little blue house (with a chimney) # [yellow] left right branch (Branch) # # This example is not demonstrating good design, it's just to illustrate # different ways of grouping, and to make sure everything aligns. # class BlueHouse(cqparts.Assembly): roof_angle = PositiveFloat(10, doc="chimney angle for chimney roof bit") house_size = PositiveFloat(7, doc="square size of little house") def make_components(self): blue = {'color': (0,0,255), 'alpha': 0.5} return { 'foo': Splitter( width=self.house_size, height=self.house_size, angle_right=self.roof_angle, _render=blue ), 'bar': Branch(diameter=3, height=2, _render=blue), } def make_constraints(self): return [ Fixed( self.components['foo'].mate_origin, # lock to origin ), Coincident( self.components['bar'].mate_origin, # lock this self.components['foo'].mate_right, # to this ), ] class GreenBranch(cqparts.Assembly): def make_components(self): green = {'color': (0,255,0), 'alpha': 0.5} return { 'branch': Branch(height=3, _render=green), 'split': Splitter(_render=green), 'L': Branch(_render=green), 'R': Branch(_render=green), 'house': BlueHouse(), } def make_constraints(self): return [ Fixed( self.components['branch'].mate_origin, # lock this CoordSystem((0,0,0), (1,0,0), (0,0,1)), # here ), Coincident( self.components['split'].mate_origin, # lock this self.components['branch'].mate_top, # here ), Coincident( self.components['L'].mate_origin, # lock this self.components['split'].mate_left, # here ), Coincident( self.components['R'].mate_origin, # lock this self.components['split'].mate_right, # here ), Coincident( self.components['house'].mate_origin, # lock this self.components['R'].mate_top, # here ), ] class BlockTree(cqparts.Assembly): trunk_diam = PositiveFloat(10, doc="trunk diameter") def make_components(self): red = {'color': (255,0,0), 'alpha': 0.9} components = { 'trunk': Branch(diameter=self.trunk_diam, twist=20, _render=red), 'trunk_split': Splitter(angle_left=60, _render=red), 'branch_lb': Branch(diameter=4, _render=red), 'branch_ls': Splitter(angle_right=30, _render=red), 'branch_r': GreenBranch(), } # branch R #cmp['branch_lb'] = Branch(diameter=2, height=) return components def make_constraints(self): return [ # trunk Fixed( self.components['trunk'].mate_origin, # lock this CoordSystem((0,0,0), (1,0,0), (0,0,1)), # here ), Coincident( self.components['trunk_split'].mate_origin, # lock this self.components['trunk'].mate_top, # here ), # branch L Coincident( self.components['branch_lb'].mate_origin, self.components['trunk_split'].mate_left, ), Coincident( self.components['branch_ls'].mate_origin, self.components['branch_lb'].mate_top, ), # branch RL Coincident( self.components['branch_r'].mate_origin, self.components['trunk_split'].mate_right, ), ] #house = Splitter() #display(house) #block_tree.world_coords = CoordSystem() # ------------------- Export / Display ------------------- from cqparts.utils.env import get_env_name env_name = get_env_name() # ------- Models block_tree = BlockTree(trunk_diam=7) #import ipdb #ipdb.set_trace() if env_name == 'cmdline': block_tree.exporter('gltf')('exports/block-tree.gltf', embed=True) print(block_tree.tree_str(name="block_tree")) elif env_name == 'freecad': pass # manually switchable for testing display(block_tree)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/fastenerheads.py
tests/manual/fastenerheads.py
#!/usr/bin/env python import os import sys import math import argparse import cadquery import cqparts from cqparts.params import * from cqparts.constraint import * from cqparts.utils import CoordSystem from cqparts.display import display, render_props import cqparts_fasteners from cqparts_fasteners.solidtypes.fastener_heads import * import logging log = logging.getLogger(__name__) # ---- create commandline parser parser = argparse.ArgumentParser(description='Display fastener heads.') parser.add_argument('-l', '--list', action='store_const', const=True, default=False, help="list possible screw drive names") parser.add_argument('name', nargs='?', help="name of screw drive") parser.add_argument('-a', '--alpha', type=float, default=0.5, help="alpha of each part") args = parser.parse_args() # Get list of names name_sets = [ cqparts.search.class_criteria[cls].get('name', set()) for cls in cqparts_fasteners.solidtypes.fastener_heads.search() ] names_list = set() for name_set in name_sets: names_list |= name_set names_list = sorted(names_list) if args.list: # List screw drives and exit for name in names_list: print(" - %s" % name) exit(0) class NamesParam(Parameter): """ List of screw-drive names """ def type(self, value): if isinstance(value, (list, tuple)): return value raise ValueError() class Showcase(cqparts.Assembly): """ Collection of screw drive boxes, laid out in a square pattern """ names = NamesParam() box_size = PositiveFloat(5) box_height = PositiveFloat(5) gap = PositiveFloat(2) def make_components(self): components = {} for name in self.names: cls = cqparts_fasteners.solidtypes.fastener_heads.find(name=name) components[name] = cls(_render={'alpha': 0.5}) return components def make_constraints(self): constraints = [] index_width = int(math.sqrt(len(self.names))) for (i, name) in enumerate(self.names): (row, col) = ((i % index_width), int(i / index_width)) constraints.append(Fixed( self.components[name].mate_origin, CoordSystem(origin=( row * (self.box_size + self.gap), -col * (self.box_size + self.gap), 0 )), )) return constraints names = names_list if args.name: # single names = [args.name] showcase = Showcase(names=names) display(showcase)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/thread_custom.py
tests/manual/thread_custom.py
import os import sys import inspect if 'MYSCRIPT_DIR' in os.environ: _this_path = os.environ['MYSCRIPT_DIR'] else: _this_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.join(_this_path, '..', '..', 'src')) import cadquery import cqparts import Part as FreeCADPart from Helpers import show from cqparts.solidtypes.threads.base import Thread, profile_to_cross_section, helical_path cadquery.freecad_impl.console_logging.enable() import logging log = logging.getLogger(__name__) # Timing tools from time import time from contextlib import contextmanager @contextmanager def measure_time(name): start_time = time() yield taken = time() - start_time log.info(" %-25s (took: %gs)" % (name, taken)) with measure_time('triangular'): # FIXME: this method of creating a "custom thread" is out-dated. # should inherit from Thread and implement build_profile profile = cadquery.Workplane("XZ") \ .moveTo(2, 0) \ .polyline([(3, 1), (3, 1.5)]) \ .threePointArc((2.4, 1.5), (2, 2)) \ .lineTo(2, 2.5) \ .threePointArc((2.5, 3), (2, 3.5)) \ .lineTo(2, 4) \ .wire() cross_section = profile_to_cross_section( profile, min_vertices=100 # increase default resolution ) # make helix path = helical_path(4.0, 4.0, 1) thread = cross_section.sweep(path, isFrenet=True) # Making thread valid thread_shape = thread.objects[0].wrapped thread_shape.sewShape() thread.objects[0].wrapped = FreeCADPart.Solid(thread_shape) # cut through box = cadquery.Workplane("XZ", origin=(0, 2, 2)) \ .box(8, 6, 4) #thread = thread.cut(box) #box = box.cut(thread) show(profile) show(cross_section) show(thread, (200, 200, 200, 0.2)) show(box, (255, 200, 200, 0.7)) # Display #show(my_thread)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/thread_eval.py
tests/manual/thread_eval.py
import os import sys import inspect if 'MYSCRIPT_DIR' in os.environ: _this_path = os.environ['MYSCRIPT_DIR'] else: _this_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.join(_this_path, '..', '..', 'src')) import cadquery import cqparts from Helpers import show cadquery.freecad_impl.console_logging.enable() import logging log = logging.getLogger(__name__) # Make cantilever parts class Anchor(cqparts.Part): def make(self): return cadquery.Workplane('XY', origin=(0, 0, -10)) \ .box(10, 30, 10, centered=(True, True, False)) class Cantilever(cqparts.Part): def make(self): return cadquery.Workplane('XY', origin=(0, 20, 0)) \ .box(10, 50, 2, centered=(True, True, False)) class Thing(cqparts.Part): def make(self): return cadquery.Workplane('XZ', origin=(0, 0, 4)) \ .box(3, 3, 3) \ .faces('<Y').hole(2) anchor = Anchor() cantilever = Cantilever() thing = Thing() def make_line(start, direction): """Single linear edge in a Wire, as an indicator""" start_v = cadquery.Vector(*start) finish_v = start_v.add(cadquery.Vector(*direction)) edge = cadquery.Edge.makeLine(start_v, finish_v) wire = cadquery.Wire.assembleEdges([edge]) return cadquery.Workplane('XY').newObject([wire]) (s1, d1) = ((0, 0, 20), (0, 0, -10)) (s2, d2) = ((0, 10, 5), d1) # Display stuff show(anchor.object, (204, 204, 204, 0.3)) show(cantilever.object, (204, 204, 204, 0.3)) show(thing.object, (204, 204, 204, 0.3)) show(make_line(s1, d1)) #show(make_line(s2, d2)) # ----- Fastener Evaluation from cqparts_fasteners.utils import VectorEvaluator evaluator = VectorEvaluator([thing, anchor, cantilever], s1, d1) for e in evaluator.eval: show(e._wire_wp) #show(e.result)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/threads.py
tests/manual/threads.py
#!/usr/bin/env python import os import sys import math import argparse import cadquery import cqparts from cqparts.params import * from cqparts.constraint import * from cqparts.utils import CoordSystem from cqparts.solidtypes.threads import * from cqparts.display import display, render_props import logging log = logging.getLogger(__name__) # ---- create commandline parser parser = argparse.ArgumentParser(description='Display fastener heads.') parser.add_argument('-l', '--list', action='store_const', const=True, default=False, help="list possible screw drive names") parser.add_argument('name', nargs='?', help="name of screw drive") parser.add_argument('-a', '--alpha', type=float, default=0.5, help="alpha of each part") args = parser.parse_args() # Get list of names name_sets = [ cqparts.search.class_criteria[cls].get('name', set()) for cls in cqparts.solidtypes.threads.search() ] names_list = set() for name_set in name_sets: names_list |= name_set names_list = sorted(names_list) from cqparts.utils.env import env_name if (not args.name) and (env_name == 'freecad'): args.name = 'triangular' if not args.name or args.list: # List screw drives and exit for name in names_list: print(" - %s" % name) else: thread = find(name=args.name)() thread._simple = False # force complex threads if env_name == 'freecad': # force complex thread from Helpers import show show(thread.local_obj) show(thread.profile) else: display(thread)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/manual/fastener.py
tests/manual/fastener.py
#!/usr/bin/env python import os import sys import inspect import logging import cadquery import cqparts from cqparts.params import * from cqparts_misc.basic.primatives import Box from cqparts.constraint import Fixed, Coincident from cqparts.constraint import Mate from cqparts.display import display from cqparts.utils.geometry import CoordSystem from cqparts_fasteners.utils import VectorEvaluator #from cqparts_fasteners import Fastener from cqparts_fasteners.fasteners.screw import ScrewFastener from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener # enable logging cadquery.freecad_impl.console_logging.enable(logging.INFO) log = logging.getLogger(__name__) alpha = 0.5 class Thing(cqparts.Assembly): def make_components(self): anchor = Box(length=25, width=30, height=15, _render={'alpha':alpha}) plate = Box(length=35, width=25, height=9, _render={'alpha':alpha}) return { 'anchor': anchor, 'plate': plate, #'fastener': NutAndBoltFastener(parts=[plate, anchor]), 'fastener': ScrewFastener(parts=[plate, anchor]), } def make_constraints(self): anchor = self.components['anchor'] plate = self.components['plate'] fastener = self.components['fastener'] return [ Fixed(anchor.mate_origin), Coincident(plate.mate_origin, anchor.mate_top), Coincident(fastener.mate_origin, plate.mate_top), ] # ------------------- Export / Display ------------------- from cqparts.utils.env import env_name # ------- Models thing = Thing() thing.world_coords = CoordSystem() #thing.world_coords = CoordSystem(origin=(1, 2, 3), xDir=(0,1,-1), normal=(1,0.3,0)) #thing.world_coords = CoordSystem.random() log.info(thing.tree_str(name="thing", prefix=' ')) if env_name == 'cmdline': display(thing) elif env_name == 'freecad': display(thing)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_motors/test_motors_catalogue.py
tests/t_cqparts_motors/test_motors_catalogue.py
from base import testlabel from cqparts.utils.test import CatalogueTest from cqparts.catalogue import JSONCatalogue def add_catalogue(filename): catalogue = JSONCatalogue(filename) cls = testlabel('catalogue')(CatalogueTest.create_from(catalogue)) globals()[cls.__name__] = cls # Stepper Catalogue(s) add_catalogue('../src/cqparts_motors/catalogue/dcmotor.json') add_catalogue('../src/cqparts_motors/catalogue/stepper-nema.json')
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/tests/t_cqparts_motors/__init__.py
tests/t_cqparts_motors/__init__.py
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/localweb.py
docs-sphinx/localweb.py
# run using: # ./go.sh web import os import time import SimpleHTTPServer import SocketServer import threading import webbrowser PORT = 9040 SocketServer.TCPServer.allow_reuse_address = True Handler = SimpleHTTPServer.SimpleHTTPRequestHandler os.chdir('./_build/html/') httpd = SocketServer.ThreadingTCPServer(("", PORT), Handler) print("serving: http://localhost:%i/" % PORT) try: server_thread = threading.Thread(target=httpd.serve_forever) server_thread.daemon = True server_thread.start() webbrowser.open("http://localhost:%i/" % PORT) while True: # wait for Ctrl+C time.sleep(0.5) except KeyboardInterrupt: print("\n[keyboard interrupt]") finally: httpd.shutdown() httpd.server_close() server_thread.join() print("[http shutdown successfully]")
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/conf.py
docs-sphinx/conf.py
# -*- coding: utf-8 -*- # # cqparts documentation build configuration file, created by # sphinx-quickstart on Thu Nov 16 13:21:56 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from distutils.version import LooseVersion import re sys.path.insert(0, os.path.abspath(os.path.join('..', 'src'))) # --- Import documented libs import cqparts # parts libs import cqparts_bearings import cqparts_fasteners import cqparts_gearboxes import cqparts_gears import cqparts_misc import cqparts_motors import cqparts_springs import cqparts_torquelimiters # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.githubpages', 'sphinx.ext.mathjax', # examples to get started: https://en.wikipedia.org/wiki/Help:Displaying_a_formula#Examples_of_implemented_TeX_formulas 'sphinx.ext.todo', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = cqparts.__title__ copyright = re.sub(r'^copyright\s*', '', cqparts.__copyright__, flags=re.I) author = cqparts.__author__ # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = unicode(cqparts.__version__) # The short X.Y version. version = u'.'.join(str(i) for i in LooseVersion(release).version[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # #html_theme = 'classic' html_theme = 'sphinx_rtd_theme' # same as cadquery # Logo #html_logo = "_static/logos/cqparts/light.svg" html_logo = "_static/logos/cqparts/dark.svg" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'cqpartsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'cqparts.tex', u'cqparts Documentation', u'Peter Boin', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'cqparts', u'cqparts Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'cqparts', u'cqparts Documentation', author, 'cqparts', 'CAD for coders, a wrapper for cadquery objects.', 'Miscellaneous'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/', None), 'cadquery': ('http://dcowden.github.io/cadquery/', None), 'numpy': ('http://docs.scipy.org/doc/numpy/', None), 'tinydb': ('http://tinydb.readthedocs.io/en/latest/', None), } # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # TODO: show todo lists todo_include_todos = True def custom_noskip(): NOSKIP = { 'instancemethod': ( '__init__', '__add__', '__sub__', '__mul__', '__div__', '__call__', ), } def callback(app, what, name, obj, skip, options): if name in NOSKIP.get(type(obj).__name__, []) and obj.__doc__: return False return None return callback from cqparts.utils.sphinx import add_parametric_object_params from cqparts.utils.sphinx import add_search_index_criteria from cqparts.utils.sphinx import skip_class_parameters def setup(app): # Custom Style-sheet (effectively inherits from theme, andn overrides it) app.add_stylesheet('css/custom.css') # Custom skip mapping app.connect("autodoc-skip-member", custom_noskip()) app.connect("autodoc-skip-member", skip_class_parameters()) # Parameter Mapping app.connect("autodoc-process-docstring", add_search_index_criteria(prepend=True)) app.connect("autodoc-process-docstring", add_parametric_object_params(prepend=True))
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/easy-install/generate.py
docs-sphinx/_static/iframes/easy-install/generate.py
#!/usr/bin/env python import cadquery import logging cadquery.freecad_impl.console_logging.enable(logging.INFO) # ------------------- Wood Screw ------------------- import cadquery import cqparts from cqparts.params import * from cqparts_fasteners.params import HeadType, DriveType, ThreadType from cqparts_fasteners.male import MaleFastenerPart from cqparts.display import display, render_props from cqparts.constraint import Mate from cqparts.utils import CoordSystem class WoodScrew(MaleFastenerPart): # --- override MaleFastenerPart parameters # sub-parts head = HeadType(default=('cheese', { 'diameter': 4, 'height': 2, }), doc="screw's head") drive = DriveType(default=('phillips', { 'diameter': 3.5, 'depth': 2.5, 'width': 0.5, }), doc="screw's drive") thread = ThreadType(default=('triangular', { 'diameter': 5, # outer 'diameter_core': 4.3, # inner 'pitch': 2, 'angle': 30, }), doc="screw's thread") # scalars neck_diam = PositiveFloat(2, doc="neck diameter") neck_length = PositiveFloat(40, doc="length from base of head to end of neck") length = PositiveFloat(50, doc="length from base of head to end of thread") # --- parameters unique for this class neck_exposed = PositiveFloat(2, doc="length of neck exposed below head") bore_diam = PositiveFloat(6, doc="diameter of screw's bore") _render = render_props(template='aluminium') def initialize_parameters(self): super(WoodScrew, self).initialize_parameters() def make(self): screw = super(WoodScrew, self).make() # add bore cylinder bore = cadquery.Workplane('XY', origin=(0, 0, -self.neck_length)) \ .circle(self.bore_diam / 2) \ .extrude(self.neck_length - self.neck_exposed) # cut out sides from bore so it takes less material for angle in [i * (360 / 3) for i in range(3)]: slice_obj = cadquery.Workplane( 'XY', origin=(self.bore_diam / 2, 0, -(self.neck_exposed + 2)) ).circle(self.bore_diam / 3) \ .extrude(-(self.neck_length - self.neck_exposed - 4)) \ .rotate((0,0,0), (0,0,1), angle) bore = bore.cut(slice_obj) screw = screw.union(bore) return screw def make_cutter(self): # we won't use MaleFastenerPart.make_cutter() because it # implements an access hole that we don't need. cutter = cadquery.Workplane('XY', origin=(0, 0, self.head.height)) \ .circle(self.bore_diam / 2) \ .extrude(-(self.neck_length + self.head.height)) cutter = cutter.union( self.thread.make_pilothole_cutter().translate(( 0, 0, -self.length )) ) return cutter def make_simple(self): # in this case, the cutter solid serves as a good simplified # model of the screw. return self.make_cutter() @property def mate_threadstart(self): return Mate(self, CoordSystem(origin=(0, 0, -self.neck_length))) # ------------------- Anchor ------------------- from math import sin, cos, pi class Anchor(cqparts.Part): # sub-parts drive = DriveType(default=('cross', { 'diameter': 5, 'width': 1, 'depth': 2.5, }), doc="anchor's drive") # scalars diameter = PositiveFloat(10, doc="diameter of anchor") height = PositiveFloat(5, doc="height of anchor") neck_diameter = PositiveFloat(2, doc="width of screw neck") head_diameter = PositiveFloat(4, doc="width of screw head") spline_point_count = IntRange(4, 200, 10, doc="number of spiral spline points") ratio_start = FloatRange(0.5, 0.99, 0.99, doc="radius ratio of wedge start") ratio_end = FloatRange(0.01, 0.8, 0.7, doc="radius ratio of wedge end") _render = render_props(color=(100, 100, 150)) # dark blue @property def wedge_radii(self): return ( (self.diameter / 2) * self.ratio_start, # start radius (self.diameter / 2) * self.ratio_end # end radius ) def make(self): obj = cadquery.Workplane('XY') \ .circle(self.diameter / 2) \ .extrude(-self.height) # neck slot : eliminate screw neck interference obj = obj.cut( cadquery.Workplane('XY', origin=(0, 0, -((self.neck_diameter + self.height) / 2))) \ .moveTo(0, 0) \ .lineTo(self.diameter / 2, 0) \ .threePointArc( (0, -self.diameter / 2), (-self.diameter / 2, 0), ) \ .close() \ .extrude(self.neck_diameter) ) # head slot : form a circular wedge with remaining material (start_r, end_r) = self.wedge_radii angles_radius = ( # as generator ( (i * (pi / self.spline_point_count)), # angle start_r + ((end_r - start_r) * (i / float(self.spline_point_count))) # radius ) for i in range(1, self.spline_point_count + 1) # avoid zero angle ) points = [(cos(a) * r, -sin(a) * r) for (a, r) in angles_radius] obj = obj.cut( cadquery.Workplane('XY', origin=(0, 0, -((self.head_diameter + self.height) / 2))) \ .moveTo(start_r, 0) \ .spline(points) \ .close() \ .extrude(self.head_diameter) ) # access port : remove a quadrant to allow screw's head through obj = obj.cut( cadquery.Workplane('XY', origin=(0, 0, -(self.height - self.head_diameter) / 2)) \ .rect(self.diameter / 2, self.diameter / 2, centered=False) \ .extrude(-self.height) ) # screw drive : to apply torque to anchor for installation if self.drive: obj = self.drive.apply(obj) # top face is on origin XY plane return obj def make_simple(self): # Just return the core cylinder return cadquery.Workplane('XY') \ .circle(self.diameter / 2) \ .extrude(-self.height) def make_cutter(self): # A solid to cut away from another; makes room to install the anchor return cadquery.Workplane('XY', origin=(0, 0, -self.height)) \ .circle(self.diameter / 2) \ .extrude(self.height + 1000) # 1m bore depth @property def mate_screwhead(self): # The location of the screwhead in it's theoretical tightened mid-point # (well, not really, but this is just a demo) (start_r, end_r) = self.wedge_radii return Mate(self, CoordSystem( origin=(0, -((start_r + end_r) / 2), -self.height / 2), xDir=(1, 0, 0), normal=(0, 1, 0) )) @property def mate_center(self): # center of object, along screw's rotation axis return Mate(self, CoordSystem(origin=(0, 0, -self.height / 2))) @property def mate_base(self): # base of object (for 3d printing placement, maybe) return Mate(self, CoordSystem(origin=(0, 0, -self.height))) # ------------------- Screw & Anchor ------------------- from cqparts.constraint import Fixed, Coincident class _Together(cqparts.Assembly): def make_components(self): return { 'screw': WoodScrew(neck_exposed=5), 'anchor': Anchor(height=7), } def make_constraints(self): return [ Fixed(self.components['screw'].mate_origin), Coincident( self.components['anchor'].mate_screwhead, self.components['screw'].mate_origin, ), ] # ------------------- WoodPanel ------------------- class WoodPanel(cqparts.Part): thickness = PositiveFloat(15, doc="thickness of panel") width = PositiveFloat(100, doc="panel width") length = PositiveFloat(100, doc="panel length") _render = render_props(template='wood') # wooden def make(self): return cadquery.Workplane('XY') \ .box(self.length, self.width, self.thickness) @property def mate_end(self): # center of +x face return Mate(self, CoordSystem( origin=(self.length / 2, 0, 0), xDir=(0, 0, -1), normal=(-1, 0, 0), )) def get_mate_edge(self, thickness): return Mate(self, CoordSystem( origin=((self.length / 2) - (thickness / 2), 0, self.thickness / 2) )) # ------------------- Fastener ------------------- from cqparts_fasteners.fasteners import Fastener from cqparts_fasteners.utils import VectorEvaluator, Selector, Applicator from cqparts.constraint import Fixed, Coincident class EasyInstallFastener(Fastener): # The origin of the evaluation is to be the target center for the anchor. Evaluator = VectorEvaluator class Selector(Selector): def get_components(self): anchor = Anchor( height=10, ) # --- Define the screw's dimensions # Get distance from anchor's center to screwhead's base # (we'll call that the "anchor's slack") v_rel_center = anchor.mate_center.local_coords.origin v_rel_screwhead = anchor.mate_screwhead.local_coords.origin anchor_slack = abs(v_rel_screwhead - v_rel_center) # The slack is along the evaluation vector, which is the same # as the woodscrew's axis of rotation. # Find the screw's neck length # This will be the length of all but the last evaluator effect, # minus the anchor's slack. effect_length = abs(self.evaluator.eval[-1].start_point - self.evaluator.eval[0].start_point) neck_length = effect_length - anchor_slack # Get thread's length : 80% of maximum thread_maxlength = abs(self.evaluator.eval[-1].end_point - self.evaluator.eval[-1].start_point) thread_length = thread_maxlength * 0.8 # Create screw screw = WoodScrew( neck_length=neck_length, length=neck_length + thread_length, ) return { 'anchor': anchor, 'screw': screw, } def get_constraints(self): last_part = self.evaluator.eval[-1].part return [ Coincident( self.components['screw'].mate_threadstart, Mate(last_part, self.evaluator.eval[-1].start_coordsys - last_part.world_coords), ), Coincident( self.components['anchor'].mate_screwhead, self.components['screw'].mate_origin, ), ] class Applicator(Applicator): def apply_alterations(self): screw = self.selector.components['screw'] anchor = self.selector.components['anchor'] screw_cutter = screw.make_cutter() # cutter in local coords anchor_cutter = anchor.make_cutter() # screw : cut from all effected parts for effect in self.evaluator.eval: screw_coordsys = screw.world_coords - effect.part.world_coords effect.part.local_obj = effect.part.local_obj.cut(screw_coordsys + screw_cutter) # anchor : all but last piece for effect in self.evaluator.eval[:-1]: anchor_coordsys = anchor.world_coords - effect.part.world_coords effect.part.local_obj = effect.part.local_obj.cut(anchor_coordsys + anchor_cutter) # ------------------- Joined Planks ------------------- class ConnectedPlanks(cqparts.Assembly): fastener_class = EasyInstallFastener def make_components(self): # Wood panels p1 = WoodPanel( length=40, width=30, _render={'alpha': 0.5} ) p2 = WoodPanel( length=40, width=30, _render={'alpha': 0.5} ) # Fastener fastener = self.fastener_class(parts=[p1, p2]) return { 'panel1': p1, 'panel2': p2, 'fastener': fastener, } def make_constraints(self): # Pull out component references p1 = self.components['panel1'] p2 = self.components['panel2'] fastener = self.components['fastener'] return [ # Assembly's origin on panel1 Fixed(p1.mate_origin), # Join panel at the corner Coincident( p2.mate_end, p1.get_mate_edge(p2.thickness), ), # Fastener assembly in the middle of a Coincident( fastener.mate_origin, # mate_origin's -Z axis is used for evaluation p2.mate_end + CoordSystem( origin=(0, 0, 25), # 25mm above panel1 surface xDir=(0, -1, 0) # rotate so anchor faces inside ), ), ] # ------------------- Catalogue ------------------- from cqparts.catalogue import JSONCatalogue import tempfile # Temporary catalogue (just for this script) catalogue_filename = tempfile.mkstemp()[1] #catalogue_filename = '/tmp/db.json' catalogue = JSONCatalogue(catalogue_filename) # Add screws to catalogue # note: this is the kind of information you'd store in a csv # file, then import with a script similar to this one, to convert that # information to a Catalogue. screws = [ { 'id': 'screw_30', 'obj_params': { # parameters to WoodScrew constructor 'neck_exposed': 5, 'length': 40, # exposing 10mm of thread 'neck_length': 30, }, 'criteria': { 'type': 'screw', 'thread_length': 10, 'compatible_anchor': 'anchor_10', }, }, { 'id': 'screw_50', 'obj_params': { 'neck_exposed': 6, 'length': 65, # exposing 15mm of thread 'neck_length': 50, }, 'criteria': { 'type': 'screw', 'thread_length': 15, 'compatible_anchor': 'anchor_15', }, }, ] for screw in screws: obj = WoodScrew(**screw['obj_params']) catalogue.add(id=screw['id'], criteria=screw['criteria'], obj=obj) # Add anchors to catalogue anchors = [ { 'id': 'anchor_10', 'obj_params': { # parameters to WoodScrew constructor 'diameter': 10, 'height': 7, }, 'criteria': {'type': 'anchor'}, }, { 'id': 'anchor_15', 'obj_params': { # parameters to WoodScrew constructor 'diameter': 15, 'height': 10, }, 'criteria': {'type': 'anchor'}, }, ] for anchor in anchors: obj = Anchor(**anchor['obj_params']) catalogue.add(id=anchor['id'], criteria=anchor['criteria'], obj=obj) catalogue.close() # ------------------- Catalogue : Fastener ------------------- from cqparts.catalogue import JSONCatalogue from cqparts.utils import property_buffered class EasyInstallCatalogueFastener(EasyInstallFastener): class Selector(EasyInstallFastener.Selector): def get_components(self): # Find minimum neck length (total effect length, minus last effect) neck_length_min = abs(self.evaluator.eval[-1].start_point - self.evaluator.eval[0].start_point) thread_length_max = abs(self.evaluator.eval[-1].end_point - self.evaluator.eval[-1].start_point) # Get the catalogue of available items catalogue = JSONCatalogue(catalogue_filename) item = catalogue.get_query() # Find viably sized wood-screw screw_item = sorted( catalogue.search( # eval sets minimum evaluation length (item.obj.params.neck_length >= neck_length_min) & # thread shouldn't pierce through last part (item.criteria.thread_length < thread_length_max) ), # sort by shortest first key=lambda x: x['obj']['params']['neck_length'] )[0] # first result; shortest screw return { 'screw': catalogue.deserialize_item(screw_item), 'anchor': catalogue.get( item.id == screw_item['criteria']['compatible_anchor'] ), } class ConnectedPlanksCatalogue(ConnectedPlanks): fastener_class = EasyInstallCatalogueFastener # ------------------- Export / Display ------------------- from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models screw = WoodScrew() anchor = Anchor() panel = WoodPanel() connected_exact = ConnectedPlanks() connected_catalogue = ConnectedPlanksCatalogue() if env_name == 'cmdline': screw.exporter('gltf')('screw.gltf') anchor.exporter('gltf')('anchor.gltf') panel.exporter('gltf')('panel.gltf') connected_exact.exporter('gltf')('connected_exact.gltf') print(connected_exact.tree_str(name='connected')) connected_catalogue.exporter('gltf')('connected_catalogue.gltf') #display(connected_exact) elif env_name == 'freecad': pass # manually switchable for testing #display(screw) #display(screw.make_cutter()) #display(anchor) #display(together) display(connected_exact) # ------------------- Catalogue : Cleanup ------------------- # Cleanup catalogue file (just for this script) import os os.unlink(catalogue_filename) #print('catalogue: %s' % catalogue_filename)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/parts/generate.py
docs-sphinx/_static/iframes/parts/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Box ------------------- import cadquery import cqparts from cqparts.params import * from cqparts.display import display class Box(cqparts.Part): length = PositiveFloat(10, doc="box length (along x-axis)") width = PositiveFloat(10, doc="box width (along y-axis)") height = PositiveFloat(10, doc="box height (along z-axis)") def make(self): return cadquery.Workplane('XY').box( self.length, self.width, self.height, centered=(True, True, False), ) box = Box(height=5) # ------------------- Disk ------------------- class Wheel(cqparts.Part): radius = PositiveFloat(100, doc="wheel's radius") width = PositiveFloat(10, doc="wheel's width") def make(self): return cadquery.Workplane('XY') \ .circle(self.radius).extrude(self.width) wheel = Wheel() # ------------------- Holy Disk ------------------- class HolyWheel(Wheel): hole_diameter = PositiveFloat(20, "diameter for shaft") def make(self): obj = super(HolyWheel, self).make() return obj.faces(">Z").circle(self.hole_diameter / 2) \ .cutThruAll() my_wheel = HolyWheel(hole_diameter=50, width=15) # ------------------- Joined Disk ------------------- class JoinedWheel(cqparts.Part): # Parameters l_radius = PositiveFloat(100, doc="left wheel's radius") l_width = PositiveFloat(10, doc="left wheel's radius") r_radius = PositiveFloat(100, doc="right wheel's radius") r_width = PositiveFloat(10, doc="right wheel's radius") axle_length = PositiveFloat(100, doc="axle length") axle_diam = PositiveFloat(10, doc="axle diameter") def make(self): # Make the axle obj = cadquery.Workplane('XY') \ .circle(self.axle_diam / 2) \ .extrude(self.axle_length) # Make the left and right wheels wheel_l = Wheel(radius=self.l_radius, width=self.l_width) wheel_r = Wheel(radius=self.r_radius, width=self.r_width) # Union them with the axle solid obj = obj.union(wheel_l.local_obj) obj = obj.union( wheel_r.local_obj.mirror('XY') \ .translate((0, 0, self.axle_length)) ) return obj joined_wheel = JoinedWheel( r_radius=80, l_width=20, axle_diam=30, ) joined_wheel.local_obj # ------------------- Red Box ------------------- from cqparts.display import render_props, display class Box(cqparts.Part): _render = render_props(template='red', alpha=0.2) def make(self): return cadquery.Workplane('XY').box(10,10,10) red_box = Box() # ------------------- Export ------------------- from cqparts.display import get_display_environment env_name = get_display_environment().name if env_name == 'freecad': pass #display(box) #display(wheel) #display(my_wheel) #display(joined_wheel) display(red_box) else: box.exporter('gltf')('box.gltf', embed=True) wheel.exporter('gltf')('wheel.gltf', embed=True) my_wheel.exporter('gltf')('holy-wheel.gltf', embed=True) joined_wheel.exporter('gltf')('joined-wheel.gltf', embed=True) red_box.exporter('gltf')('red-box.gltf', embed=True)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/toy-car/generate.py
docs-sphinx/_static/iframes/toy-car/generate.py
#!/usr/bin/env python # Get repository root from subprocess import check_output CQPARTS_ROOT = check_output(["git", "rev-parse", "--show-toplevel"]).rstrip('\n') # Add examples folder to path import sys import os sys.path.append(os.path.join(CQPARTS_ROOT, 'examples')) # ------------------- Import Module ------------------- # Import example as module import toy_car # ------------------- Export / Display ------------------- if __name__ == '__main__': # Wheel wheel = toy_car.Wheel() wheel.exporter('gltf')('wheel.gltf') # Axle axle = toy_car.Axle() axle.exporter('gltf')('axle.gltf') # Chassis chassis = toy_car.Chassis() chassis.exporter('gltf')('chassis.gltf') # Wheel Assembly wheeled_axle = toy_car.WheeledAxle(right_width=2) wheeled_axle.exporter('gltf')('wheel-assembly.gltf') print(wheeled_axle.tree_str(name='wheel_assembly')) # Car Assembly car = toy_car.Car() car.exporter('gltf')('car.gltf') print(car.tree_str(name='car')) car.find('chassis').exporter('gltf')('chassis-altered.gltf')
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/asm-build-cycle/generate.py
docs-sphinx/_static/iframes/asm-build-cycle/generate.py
#!/usr/bin/env python # -------------------------- Cylinder Part -------------------------- import cadquery import cqparts from cqparts.params import * from cqparts.display import display, render_props from cqparts.utils.geometry import CoordSystem from cqparts.constraint import Mate class Cylinder(cqparts.Part): diam = PositiveFloat(10, doc="cylinder's diameter") length = PositiveFloat(10, doc="cylinder's length") embed = PositiveFloat(2, doc="embedding distance") hole_diam = PositiveFloat(2.72, doc="pilot hole diam") _render = render_props(alpha=0.8) def make_base_cylinder(self): # This is used as a basis for make() and cutaway() return cadquery.Workplane('XY') \ .circle(self.diam/2).extrude(self.embed + self.length) def make(self): # Use the base cylindrical shape, and cut a hole through it return self.make_base_cylinder() \ .faces(">Z").hole(self.hole_diam / 2) @property def cutaway(self): # Use the base cylindrical shape, no alterations return self.make_base_cylinder() @property def mate_embedded(self): return Mate(self, CoordSystem((0, 0, self.embed))) # -------------------------- Plate Part -------------------------- from math import sin, cos, radians class Plate(cqparts.Part): length = PositiveFloat(20, doc="plate length") width = PositiveFloat(20, doc="plate width") thickness = PositiveFloat(10, doc="plate thickness") hole_diam = PositiveFloat(3, doc="hole diameter") connection_offset = Float(4, doc="hole's offset from plate center along x-axis") connection_angle = Float(15, doc="angle of mate point") def make(self): plate = cadquery.Workplane('XY') \ .box(self.length, self.width, self.thickness) hole_tool = cadquery.Workplane('XY', origin=(0, 0, -self.thickness * 5)) \ .circle(self.hole_diam / 2).extrude(self.thickness * 10) hole_tool = self.mate_hole.local_coords + hole_tool return plate.cut(hole_tool) @property def mate_hole(self): return Mate(self, CoordSystem( origin=(self.connection_offset, 0, self.thickness/2), xDir=(1, 0, 0), normal=(0, -sin(radians(self.connection_angle)), cos(radians(self.connection_angle))), )) # -------------------------- Demo Assembly -------------------------- from cqparts.constraint import Fixed, Coincident class Thing(cqparts.Assembly): # Components are updated to self.components first def make_components(self): return { 'pla': Plate(), 'cyl': Cylinder(), } # Then constraints are appended to self.constraints (second) def make_constraints(self): plate = self.components['pla'] cylinder = self.components['cyl'] return [ Fixed( mate=plate.mate_origin, world_coords=CoordSystem(origin=(-1,-5,-2), xDir=(-0.5,1,0)) # a bit of random placement ), Coincident( mate=cylinder.mate_embedded, to_mate=plate.mate_hole, ), ] # In between updating components, and adding constraints: # self.solve() is run. # This gives each component a valid world_coords value, so # we can use it in the next step... # Lastly, this function is run (any return is ignored) def make_alterations(self): # get Cylinder's location relative to the Plate coords = self.components['cyl'].world_coords - self.components['pla'].world_coords # apply that to the "cutout" we want to subtract from the plate cutout = coords + self.components['cyl'].cutaway self.components['pla'].local_obj = self.components['pla'].local_obj.cut(cutout) # -------------------------- Multiple Cycles -------------------------- from cqparts_misc.basic.primatives import Cube, Box, Sphere class BlockStack(cqparts.Assembly): def make_components(self): print("make Box 'a'") yield {'a': Box(length=10, width=10, height=20)} # grey print("make 2 Cubes 'b', and 'c'") yield { 'b': Cube(size=8, _render={'color': (255, 0, 0)}), # red 'c': Cube(size=3, _render={'color': (0, 255, 0)}), # green } print("make sphere 'd'") yield {'d': Sphere(radius=3, _render={'color': (0, 0, 255)})} # blue def make_constraints(self): print("place 'a' at origin") a = self.components['a'] yield [Fixed(a.mate_origin, CoordSystem((0,0,-10)))] print("place 'b' & 'c' relative to 'a'") b = self.components['b'] c = self.components['c'] yield [ Fixed(b.mate_bottom, a.world_coords + a.mate_pos_x.local_coords), Fixed(c.mate_bottom, a.world_coords + a.mate_neg_y.local_coords), ] print("place sphere 'd' on cube 'b'") d = self.components['d'] yield [Fixed(d.mate_origin, b.world_coords + b.mate_pos_x.local_coords)] def make_alterations(self): print("first round alteration(s)") yield print("second round alteration(s)") yield print("third round alteration(s)") yield # ------------------- Export / Display ------------------- # ------- Functions from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models cylinder = Cylinder() plate = Plate() thing = Thing() block_stack = BlockStack() if env_name == 'cmdline': pass # manually switchable for testing cylinder.exporter('gltf')('cylinder.gltf') plate.exporter('gltf')('plate.gltf') thing.exporter('gltf')('thing.gltf') thing.find('pla').exporter('gltf')('plate-altered.gltf') block_stack.exporter('gltf')('block_stack.gltf') #display(block_stack) elif env_name == 'freecad': pass # manually switchable for testing #display(cylinder) #display(plate) #display(thing.find('pla')) display(thing) #display(block_stack)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/biscuit/generate.py
docs-sphinx/_static/iframes/biscuit/generate.py
#!/usr/bin/env python # ------------------- Wood Panel ------------------- from math import radians, tan, cos import cadquery import cqparts from cqparts.params import * from cqparts.constraint import Mate from cqparts.utils import CoordSystem from cqparts.display import display, render_props class Panel(cqparts.Part): # dimensions height = PositiveFloat(50, doc="panel height (along join)") depth = PositiveFloat(50, doc="panel depth (from join to opposite side)") width = PositiveFloat(10, doc="thickness of panel") join_angle = FloatRange(0, 89, 45, doc="angle of join (unit: degrees)") _render = render_props(template='wood', alpha=0.5) def make(self): points = [ (0, 0), (self.depth, 0), (self.depth, self.width), (self.width * tan(radians(self.join_angle)), self.width), ] return cadquery.Workplane('XZ', origin=(0, self.height / 2, 0)) \ .moveTo(*points[0]).polyline(points[1:]).close() \ .extrude(self.height) def get_mate_join(self, ratio=0.5): # Return a mate that's somewhere along the join surface. return Mate(self, ( CoordSystem().rotated( (0, -(90 - self.join_angle), 0) ) + CoordSystem( origin=( (self.width / cos(radians(self.join_angle))) / 2, (-self.height / 2) + (self.height * ratio), 0 ), ) )) @property def mate_join(self): # default is half way along join return self.get_mate_join(ratio=0.5) @property def mate_join_reverse(self): # reversed join rotated around X-axis 180 deg return self.mate_join + CoordSystem().rotated((180, 0, 0)) # ------------------- Biscuit ------------------- class Biscuit(cqparts.Part): # Biscuit Dimensions width = PositiveFloat(30, doc="twice penetration depth") length = PositiveFloat(None, doc="length tip to tip") thickness = PositiveFloat(5, doc="material thickness") _render = render_props(template='wood_dark') def initialize_parameters(self): super(Biscuit, self).initialize_parameters() # set default length as a ratio of width if self.length is None: self.length = (5. / 3) * self.width def make(self): # We'll just use the simplified model for this example return self.make_simple() # It could be rounded at the ends, and the sides chamfered, but # for this example we'll just keep it simple. def make_simple(self): # Biscuit shaped like a eye, 2 arcs from end to end (length) # Create left & right side, union them together biscuit = cadquery.Workplane('XY') for i in [1, -1]: biscuit = biscuit.union( cadquery.Workplane('XY', origin=(0, 0, -self.thickness / 2)) \ .moveTo(self.length / 2, 0) \ .threePointArc( (0, i * self.width / 2), (-self.length / 2, 0) ).close().extrude(self.thickness) ) return biscuit def make_cutter(self): # the cutaway material is the same shape as the biscuit itself # (the simplified model) return self.make_simple() # ------------------- Biscuit Fastener ------------------- from cqparts_fasteners.fasteners.base import Fastener from cqparts_fasteners.utils.evaluator import Evaluator, VectorEvaluator from cqparts_fasteners.utils.selector import Selector from cqparts_fasteners.utils.applicator import Applicator from cqparts.constraint import Fixed from itertools import chain class BiscuitFastener(Fastener): # Parameters ratio = FloatRange(0, 1, 0.5, doc="ratio penetration of biscuit into parts") cut_biscuit_holes = Boolean(True, doc="if True, biscuit holes are cut into pannels") class Evaluator(Evaluator): # Bi-directional evaluator, employes 2 VectorEvaluator instances that, # on their own, evaluate in the -Z direction def __init__(self, parts, location, parent=None): super(BiscuitFastener.Evaluator, self).__init__(parts=parts, parent=parent) self.location = location # positive z direction self.eval_pos = VectorEvaluator(parts, location.rotated((180, 0, 0))) # negative z direction self.eval_neg = VectorEvaluator(parts, location) def perform_evaluation(self): return (self.eval_pos.eval, self.eval_neg.eval) class Selector(Selector): def get_components(self): # Determine maximum biscuit width from the evaluations (pos, neg) = self.evaluator.eval pos_length = abs(pos[-1].end_point - pos[0].start_point) neg_length = abs(neg[-1].end_point - neg[0].start_point) max_width = 2 * min( pos_length * self.parent.ratio, # parent is the BiscuitFastener instance neg_length * self.parent.ratio ) return { 'biscuit': Biscuit( width=max_width, thickness=max_width * 0.1, ), } def get_constraints(self): #(pos, neg) = self.evaluator.eval return [ Fixed( self.components['biscuit'].mate_origin, CoordSystem().rotated((90, 0, 90)) # correctly orientate biscuit ), ] class Applicator(Applicator): def apply_alterations(self): if not self.parent.cut_biscuit_holes: return # fastener configured to place biscuit overlapping panel # Get the biscuit cutout shape biscuit = self.selector.components['biscuit'] biscuit_cutter = biscuit.make_cutter() # cutter in local coords # Duplicate parts possible with floating point rounding, because the # evaluator is placed on the 2 planar surfaces being joined. effected_parts = set([ # duplicates are removed within the set effect.part for effect in chain(*self.evaluator.eval[:]) ]) # Move biscuit relative to altered part's local coordinates, then # alter the part's local_obj. for part in effected_parts: biscuit_coordsys = biscuit.world_coords - part.world_coords part.local_obj = part.local_obj.cut(biscuit_coordsys + biscuit_cutter) # ------------------- Corner Assembly ------------------- from cqparts.constraint import Fixed, Coincident class CornerAssembly(cqparts.Assembly): biscuit_count = PositiveInt(2, doc="number of biscuits") join_angle = FloatRange(0, 89, 45, doc="angle of join (unit: degrees)") biscuit_holes = Boolean(True, doc="if True, holes are cut into pannels to house biscuits") def make_components(self): components = { 'a': Panel(join_angle=self.join_angle), 'b': Panel(join_angle=self.join_angle), } for i in range(self.biscuit_count): components['f_%i' % i] = BiscuitFastener( parts=[components['a'], components['b']], cut_biscuit_holes=self.biscuit_holes, ) return components def make_constraints(self): # position joined panels a = self.components['a'] b = self.components['b'] yield [ Fixed(a.mate_origin), Coincident( b.mate_join_reverse, a.mate_join ), ] # position biscuits along join biscuits = [ c for c in self.components.values() if isinstance(c, BiscuitFastener) ] yield [ Coincident( c.mate_origin, a.get_mate_join( ratio=(i + 1) * (1. / (len(biscuits) + 1)) ) ) for (i, c) in enumerate(biscuits) ] # ------------------- Export / Display ------------------- from cqparts.display import display from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models panel = Panel() biscuit = Biscuit() corner_assembly = CornerAssembly( join_angle=45, biscuit_holes=True, ) if env_name == 'cmdline': panel.exporter('gltf')('panel.gltf') biscuit.exporter('gltf')('biscuit.gltf') corner_assembly.exporter('gltf')('corner_assembly.gltf') print(corner_assembly.tree_str()) #display(panel) #display(biscuit) #display(corner_assembly) elif env_name == 'freecad': #display(panel) #display(biscuit) display(corner_assembly)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/fastener-screw/generate.py
docs-sphinx/_static/iframes/fastener-screw/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Thing ------------------- import cqparts from cqparts.constraint import Fixed, Coincident from cqparts_fasteners.fasteners.screw import ScrewFastener from cqparts_misc.basic.primatives import Box from cqparts.utils import CoordSystem class Thing(cqparts.Assembly): def make_components(self): base = Box(length=20, width=30, height=15) top = Box(length=40, width=20, height=5) return { 'base': base, 'top': top, 'fastener': ScrewFastener(parts=[base, top]), } def make_constraints(self): base = self.components['base'] top = self.components['top'] fastener = self.components['fastener'] return [ Fixed(base.mate_bottom), Coincident(top.mate_bottom, base.mate_top), Coincident(fastener.mate_origin, top.mate_top), ] thing = Thing() # ------------------- Export ------------------- from cqparts.params import * from cqparts.display import display from cqparts.display import get_display_environment env_name = get_display_environment().name if env_name == 'freecad': display(thing) else: thing.exporter('gltf')('thing.gltf', embed=False)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs-sphinx/_static/iframes/fastener-nut-bolt/generate.py
docs-sphinx/_static/iframes/fastener-nut-bolt/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Thing ------------------- import cqparts from cqparts.constraint import Fixed, Coincident from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener from cqparts_misc.basic.primatives import Box class Thing(cqparts.Assembly): def make_components(self): base = Box(length=20, width=30, height=15) top = Box(length=40, width=20, height=5) return { 'base': base, 'top': top, 'fastener': NutAndBoltFastener(parts=[base, top]), } def make_constraints(self): base = self.components['base'] top = self.components['top'] fastener = self.components['fastener'] return [ Fixed(base.mate_bottom), Coincident(top.mate_bottom, base.mate_top), Coincident(fastener.mate_origin, top.mate_top), ] thing = Thing() # ------------------- Export ------------------- from cqparts.params import * from cqparts.display import display from cqparts.display import get_display_environment env_name = get_display_environment().name if env_name == 'freecad': display(thing) else: thing.exporter('gltf')('thing.gltf', embed=False)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/easy-install/generate.py
docs/doc/_static/iframes/easy-install/generate.py
#!/usr/bin/env python import cadquery import logging cadquery.freecad_impl.console_logging.enable(logging.INFO) # ------------------- Wood Screw ------------------- import cadquery import cqparts from cqparts.params import * from cqparts_fasteners.params import HeadType, DriveType, ThreadType from cqparts_fasteners.male import MaleFastenerPart from cqparts.display import display, render_props from cqparts.constraint import Mate from cqparts.utils import CoordSystem class WoodScrew(MaleFastenerPart): # --- override MaleFastenerPart parameters # sub-parts head = HeadType(default=('cheese', { 'diameter': 4, 'height': 2, }), doc="screw's head") drive = DriveType(default=('phillips', { 'diameter': 3.5, 'depth': 2.5, 'width': 0.5, }), doc="screw's drive") thread = ThreadType(default=('triangular', { 'diameter': 5, # outer 'diameter_core': 4.3, # inner 'pitch': 2, 'angle': 30, }), doc="screw's thread") # scalars neck_diam = PositiveFloat(2, doc="neck diameter") neck_length = PositiveFloat(40, doc="length from base of head to end of neck") length = PositiveFloat(50, doc="length from base of head to end of thread") # --- parameters unique for this class neck_exposed = PositiveFloat(2, doc="length of neck exposed below head") bore_diam = PositiveFloat(6, doc="diameter of screw's bore") _render = render_props(template='aluminium') def initialize_parameters(self): super(WoodScrew, self).initialize_parameters() def make(self): screw = super(WoodScrew, self).make() # add bore cylinder bore = cadquery.Workplane('XY', origin=(0, 0, -self.neck_length)) \ .circle(self.bore_diam / 2) \ .extrude(self.neck_length - self.neck_exposed) # cut out sides from bore so it takes less material for angle in [i * (360 / 3) for i in range(3)]: slice_obj = cadquery.Workplane( 'XY', origin=(self.bore_diam / 2, 0, -(self.neck_exposed + 2)) ).circle(self.bore_diam / 3) \ .extrude(-(self.neck_length - self.neck_exposed - 4)) \ .rotate((0,0,0), (0,0,1), angle) bore = bore.cut(slice_obj) screw = screw.union(bore) return screw def make_cutter(self): # we won't use MaleFastenerPart.make_cutter() because it # implements an access hole that we don't need. cutter = cadquery.Workplane('XY', origin=(0, 0, self.head.height)) \ .circle(self.bore_diam / 2) \ .extrude(-(self.neck_length + self.head.height)) cutter = cutter.union( self.thread.make_pilothole_cutter().translate(( 0, 0, -self.length )) ) return cutter def make_simple(self): # in this case, the cutter solid serves as a good simplified # model of the screw. return self.make_cutter() @property def mate_threadstart(self): return Mate(self, CoordSystem(origin=(0, 0, -self.neck_length))) # ------------------- Anchor ------------------- from math import sin, cos, pi class Anchor(cqparts.Part): # sub-parts drive = DriveType(default=('cross', { 'diameter': 5, 'width': 1, 'depth': 2.5, }), doc="anchor's drive") # scalars diameter = PositiveFloat(10, doc="diameter of anchor") height = PositiveFloat(5, doc="height of anchor") neck_diameter = PositiveFloat(2, doc="width of screw neck") head_diameter = PositiveFloat(4, doc="width of screw head") spline_point_count = IntRange(4, 200, 10, doc="number of spiral spline points") ratio_start = FloatRange(0.5, 0.99, 0.99, doc="radius ratio of wedge start") ratio_end = FloatRange(0.01, 0.8, 0.7, doc="radius ratio of wedge end") _render = render_props(color=(100, 100, 150)) # dark blue @property def wedge_radii(self): return ( (self.diameter / 2) * self.ratio_start, # start radius (self.diameter / 2) * self.ratio_end # end radius ) def make(self): obj = cadquery.Workplane('XY') \ .circle(self.diameter / 2) \ .extrude(-self.height) # neck slot : eliminate screw neck interference obj = obj.cut( cadquery.Workplane('XY', origin=(0, 0, -((self.neck_diameter + self.height) / 2))) \ .moveTo(0, 0) \ .lineTo(self.diameter / 2, 0) \ .threePointArc( (0, -self.diameter / 2), (-self.diameter / 2, 0), ) \ .close() \ .extrude(self.neck_diameter) ) # head slot : form a circular wedge with remaining material (start_r, end_r) = self.wedge_radii angles_radius = ( # as generator ( (i * (pi / self.spline_point_count)), # angle start_r + ((end_r - start_r) * (i / float(self.spline_point_count))) # radius ) for i in range(1, self.spline_point_count + 1) # avoid zero angle ) points = [(cos(a) * r, -sin(a) * r) for (a, r) in angles_radius] obj = obj.cut( cadquery.Workplane('XY', origin=(0, 0, -((self.head_diameter + self.height) / 2))) \ .moveTo(start_r, 0) \ .spline(points) \ .close() \ .extrude(self.head_diameter) ) # access port : remove a quadrant to allow screw's head through obj = obj.cut( cadquery.Workplane('XY', origin=(0, 0, -(self.height - self.head_diameter) / 2)) \ .rect(self.diameter / 2, self.diameter / 2, centered=False) \ .extrude(-self.height) ) # screw drive : to apply torque to anchor for installation if self.drive: obj = self.drive.apply(obj) # top face is on origin XY plane return obj def make_simple(self): # Just return the core cylinder return cadquery.Workplane('XY') \ .circle(self.diameter / 2) \ .extrude(-self.height) def make_cutter(self): # A solid to cut away from another; makes room to install the anchor return cadquery.Workplane('XY', origin=(0, 0, -self.height)) \ .circle(self.diameter / 2) \ .extrude(self.height + 1000) # 1m bore depth @property def mate_screwhead(self): # The location of the screwhead in it's theoretical tightened mid-point # (well, not really, but this is just a demo) (start_r, end_r) = self.wedge_radii return Mate(self, CoordSystem( origin=(0, -((start_r + end_r) / 2), -self.height / 2), xDir=(1, 0, 0), normal=(0, 1, 0) )) @property def mate_center(self): # center of object, along screw's rotation axis return Mate(self, CoordSystem(origin=(0, 0, -self.height / 2))) @property def mate_base(self): # base of object (for 3d printing placement, maybe) return Mate(self, CoordSystem(origin=(0, 0, -self.height))) # ------------------- Screw & Anchor ------------------- from cqparts.constraint import Fixed, Coincident class _Together(cqparts.Assembly): def make_components(self): return { 'screw': WoodScrew(neck_exposed=5), 'anchor': Anchor(height=7), } def make_constraints(self): return [ Fixed(self.components['screw'].mate_origin), Coincident( self.components['anchor'].mate_screwhead, self.components['screw'].mate_origin, ), ] # ------------------- WoodPanel ------------------- class WoodPanel(cqparts.Part): thickness = PositiveFloat(15, doc="thickness of panel") width = PositiveFloat(100, doc="panel width") length = PositiveFloat(100, doc="panel length") _render = render_props(template='wood') # wooden def make(self): return cadquery.Workplane('XY') \ .box(self.length, self.width, self.thickness) @property def mate_end(self): # center of +x face return Mate(self, CoordSystem( origin=(self.length / 2, 0, 0), xDir=(0, 0, -1), normal=(-1, 0, 0), )) def get_mate_edge(self, thickness): return Mate(self, CoordSystem( origin=((self.length / 2) - (thickness / 2), 0, self.thickness / 2) )) # ------------------- Fastener ------------------- from cqparts_fasteners.fasteners import Fastener from cqparts_fasteners.utils import VectorEvaluator, Selector, Applicator from cqparts.constraint import Fixed, Coincident class EasyInstallFastener(Fastener): # The origin of the evaluation is to be the target center for the anchor. Evaluator = VectorEvaluator class Selector(Selector): def get_components(self): anchor = Anchor( height=10, ) # --- Define the screw's dimensions # Get distance from anchor's center to screwhead's base # (we'll call that the "anchor's slack") v_rel_center = anchor.mate_center.local_coords.origin v_rel_screwhead = anchor.mate_screwhead.local_coords.origin anchor_slack = abs(v_rel_screwhead - v_rel_center) # The slack is along the evaluation vector, which is the same # as the woodscrew's axis of rotation. # Find the screw's neck length # This will be the length of all but the last evaluator effect, # minus the anchor's slack. effect_length = abs(self.evaluator.eval[-1].start_point - self.evaluator.eval[0].start_point) neck_length = effect_length - anchor_slack # Get thread's length : 80% of maximum thread_maxlength = abs(self.evaluator.eval[-1].end_point - self.evaluator.eval[-1].start_point) thread_length = thread_maxlength * 0.8 # Create screw screw = WoodScrew( neck_length=neck_length, length=neck_length + thread_length, ) return { 'anchor': anchor, 'screw': screw, } def get_constraints(self): last_part = self.evaluator.eval[-1].part return [ Coincident( self.components['screw'].mate_threadstart, Mate(last_part, self.evaluator.eval[-1].start_coordsys - last_part.world_coords), ), Coincident( self.components['anchor'].mate_screwhead, self.components['screw'].mate_origin, ), ] class Applicator(Applicator): def apply_alterations(self): screw = self.selector.components['screw'] anchor = self.selector.components['anchor'] screw_cutter = screw.make_cutter() # cutter in local coords anchor_cutter = anchor.make_cutter() # screw : cut from all effected parts for effect in self.evaluator.eval: screw_coordsys = screw.world_coords - effect.part.world_coords effect.part.local_obj = effect.part.local_obj.cut(screw_coordsys + screw_cutter) # anchor : all but last piece for effect in self.evaluator.eval[:-1]: anchor_coordsys = anchor.world_coords - effect.part.world_coords effect.part.local_obj = effect.part.local_obj.cut(anchor_coordsys + anchor_cutter) # ------------------- Joined Planks ------------------- class ConnectedPlanks(cqparts.Assembly): fastener_class = EasyInstallFastener def make_components(self): # Wood panels p1 = WoodPanel( length=40, width=30, _render={'alpha': 0.5} ) p2 = WoodPanel( length=40, width=30, _render={'alpha': 0.5} ) # Fastener fastener = self.fastener_class(parts=[p1, p2]) return { 'panel1': p1, 'panel2': p2, 'fastener': fastener, } def make_constraints(self): # Pull out component references p1 = self.components['panel1'] p2 = self.components['panel2'] fastener = self.components['fastener'] return [ # Assembly's origin on panel1 Fixed(p1.mate_origin), # Join panel at the corner Coincident( p2.mate_end, p1.get_mate_edge(p2.thickness), ), # Fastener assembly in the middle of a Coincident( fastener.mate_origin, # mate_origin's -Z axis is used for evaluation p2.mate_end + CoordSystem( origin=(0, 0, 25), # 25mm above panel1 surface xDir=(0, -1, 0) # rotate so anchor faces inside ), ), ] # ------------------- Catalogue ------------------- from cqparts.catalogue import JSONCatalogue import tempfile # Temporary catalogue (just for this script) catalogue_filename = tempfile.mkstemp()[1] #catalogue_filename = '/tmp/db.json' catalogue = JSONCatalogue(catalogue_filename) # Add screws to catalogue # note: this is the kind of information you'd store in a csv # file, then import with a script similar to this one, to convert that # information to a Catalogue. screws = [ { 'id': 'screw_30', 'obj_params': { # parameters to WoodScrew constructor 'neck_exposed': 5, 'length': 40, # exposing 10mm of thread 'neck_length': 30, }, 'criteria': { 'type': 'screw', 'thread_length': 10, 'compatible_anchor': 'anchor_10', }, }, { 'id': 'screw_50', 'obj_params': { 'neck_exposed': 6, 'length': 65, # exposing 15mm of thread 'neck_length': 50, }, 'criteria': { 'type': 'screw', 'thread_length': 15, 'compatible_anchor': 'anchor_15', }, }, ] for screw in screws: obj = WoodScrew(**screw['obj_params']) catalogue.add(id=screw['id'], criteria=screw['criteria'], obj=obj) # Add anchors to catalogue anchors = [ { 'id': 'anchor_10', 'obj_params': { # parameters to WoodScrew constructor 'diameter': 10, 'height': 7, }, 'criteria': {'type': 'anchor'}, }, { 'id': 'anchor_15', 'obj_params': { # parameters to WoodScrew constructor 'diameter': 15, 'height': 10, }, 'criteria': {'type': 'anchor'}, }, ] for anchor in anchors: obj = Anchor(**anchor['obj_params']) catalogue.add(id=anchor['id'], criteria=anchor['criteria'], obj=obj) catalogue.close() # ------------------- Catalogue : Fastener ------------------- from cqparts.catalogue import JSONCatalogue from cqparts.utils import property_buffered class EasyInstallCatalogueFastener(EasyInstallFastener): class Selector(EasyInstallFastener.Selector): def get_components(self): # Find minimum neck length (total effect length, minus last effect) neck_length_min = abs(self.evaluator.eval[-1].start_point - self.evaluator.eval[0].start_point) thread_length_max = abs(self.evaluator.eval[-1].end_point - self.evaluator.eval[-1].start_point) # Get the catalogue of available items catalogue = JSONCatalogue(catalogue_filename) item = catalogue.get_query() # Find viably sized wood-screw screw_item = sorted( catalogue.search( # eval sets minimum evaluation length (item.obj.params.neck_length >= neck_length_min) & # thread shouldn't pierce through last part (item.criteria.thread_length < thread_length_max) ), # sort by shortest first key=lambda x: x['obj']['params']['neck_length'] )[0] # first result; shortest screw return { 'screw': catalogue.deserialize_item(screw_item), 'anchor': catalogue.get( item.id == screw_item['criteria']['compatible_anchor'] ), } class ConnectedPlanksCatalogue(ConnectedPlanks): fastener_class = EasyInstallCatalogueFastener # ------------------- Export / Display ------------------- from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models screw = WoodScrew() anchor = Anchor() panel = WoodPanel() connected_exact = ConnectedPlanks() connected_catalogue = ConnectedPlanksCatalogue() if env_name == 'cmdline': screw.exporter('gltf')('screw.gltf') anchor.exporter('gltf')('anchor.gltf') panel.exporter('gltf')('panel.gltf') connected_exact.exporter('gltf')('connected_exact.gltf') print(connected_exact.tree_str(name='connected')) connected_catalogue.exporter('gltf')('connected_catalogue.gltf') #display(connected_exact) elif env_name == 'freecad': pass # manually switchable for testing #display(screw) #display(screw.make_cutter()) #display(anchor) #display(together) display(connected_exact) # ------------------- Catalogue : Cleanup ------------------- # Cleanup catalogue file (just for this script) import os os.unlink(catalogue_filename) #print('catalogue: %s' % catalogue_filename)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/parts/generate.py
docs/doc/_static/iframes/parts/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Box ------------------- import cadquery import cqparts from cqparts.params import * from cqparts.display import display class Box(cqparts.Part): length = PositiveFloat(10, doc="box length (along x-axis)") width = PositiveFloat(10, doc="box width (along y-axis)") height = PositiveFloat(10, doc="box height (along z-axis)") def make(self): return cadquery.Workplane('XY').box( self.length, self.width, self.height, centered=(True, True, False), ) box = Box(height=5) # ------------------- Disk ------------------- class Wheel(cqparts.Part): radius = PositiveFloat(100, doc="wheel's radius") width = PositiveFloat(10, doc="wheel's width") def make(self): return cadquery.Workplane('XY') \ .circle(self.radius).extrude(self.width) wheel = Wheel() # ------------------- Holy Disk ------------------- class HolyWheel(Wheel): hole_diameter = PositiveFloat(20, "diameter for shaft") def make(self): obj = super(HolyWheel, self).make() return obj.faces(">Z").circle(self.hole_diameter / 2) \ .cutThruAll() my_wheel = HolyWheel(hole_diameter=50, width=15) # ------------------- Joined Disk ------------------- class JoinedWheel(cqparts.Part): # Parameters l_radius = PositiveFloat(100, doc="left wheel's radius") l_width = PositiveFloat(10, doc="left wheel's radius") r_radius = PositiveFloat(100, doc="right wheel's radius") r_width = PositiveFloat(10, doc="right wheel's radius") axle_length = PositiveFloat(100, doc="axle length") axle_diam = PositiveFloat(10, doc="axle diameter") def make(self): # Make the axle obj = cadquery.Workplane('XY') \ .circle(self.axle_diam / 2) \ .extrude(self.axle_length) # Make the left and right wheels wheel_l = Wheel(radius=self.l_radius, width=self.l_width) wheel_r = Wheel(radius=self.r_radius, width=self.r_width) # Union them with the axle solid obj = obj.union(wheel_l.local_obj) obj = obj.union( wheel_r.local_obj.mirror('XY') \ .translate((0, 0, self.axle_length)) ) return obj joined_wheel = JoinedWheel( r_radius=80, l_width=20, axle_diam=30, ) joined_wheel.local_obj # ------------------- Red Box ------------------- from cqparts.display import render_props, display class Box(cqparts.Part): _render = render_props(template='red', alpha=0.2) def make(self): return cadquery.Workplane('XY').box(10,10,10) red_box = Box() # ------------------- Export ------------------- from cqparts.display import get_display_environment env_name = get_display_environment().name if env_name == 'freecad': pass #display(box) #display(wheel) #display(my_wheel) #display(joined_wheel) display(red_box) else: box.exporter('gltf')('box.gltf', embed=True) wheel.exporter('gltf')('wheel.gltf', embed=True) my_wheel.exporter('gltf')('holy-wheel.gltf', embed=True) joined_wheel.exporter('gltf')('joined-wheel.gltf', embed=True) red_box.exporter('gltf')('red-box.gltf', embed=True)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/toy-car/generate.py
docs/doc/_static/iframes/toy-car/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Wheel ------------------- import cadquery import cqparts from cqparts.params import * from cqparts.display import render_props, display class Wheel(cqparts.Part): # Parameters width = PositiveFloat(10, doc="width of wheel") diameter = PositiveFloat(30, doc="wheel diameter") # default appearance _render = render_props(template='wood_dark') def make(self): wheel = cadquery.Workplane('XY') \ .circle(self.diameter / 2).extrude(self.width) hole = cadquery.Workplane('XY') \ .circle(2).extrude(self.width/2).faces(">Z") \ .circle(4).extrude(self.width/2) wheel = wheel.cut(hole) return wheel def get_cutout(self, clearance=0): # A cylinder with a equal clearance on every face return cadquery.Workplane('XY', origin=(0, 0, -clearance)) \ .circle((self.diameter / 2) + clearance) \ .extrude(self.width + (2 * clearance)) # ------------------- Axle ------------------- from cqparts.constraint import Mate from cqparts.utils.geometry import CoordSystem class Axle(cqparts.Part): # Parameters length = PositiveFloat(50, doc="axle length") diameter = PositiveFloat(10, doc="axle diameter") # default appearance _render = render_props(color=(50, 50, 50)) # dark grey def make(self): axle = cadquery.Workplane('ZX', origin=(0, -self.length/2, 0)) \ .circle(self.diameter / 2).extrude(self.length) cutout = cadquery.Workplane('ZX', origin=(0, -self.length/2, 0)) \ .circle(1.5).extrude(10) axle = axle.cut(cutout) cutout = cadquery.Workplane('XZ', origin=(0, self.length/2, 0)) \ .circle(1.5).extrude(10) axle = axle.cut(cutout) return axle # wheel mates, assuming they rotate around z-axis @property def mate_left(self): return Mate(self, CoordSystem( origin=(0, -self.length / 2, 0), xDir=(1, 0, 0), normal=(0, -1, 0), )) @property def mate_right(self): return Mate(self, CoordSystem( origin=(0, self.length / 2, 0), xDir=(1, 0, 0), normal=(0, 1, 0), )) def get_cutout(self, clearance=0): return cadquery.Workplane('ZX', origin=(0, -self.length/2 - clearance, 0)) \ .circle((self.diameter / 2) + clearance) \ .extrude(self.length + (2 * clearance)) # ------------------- Chassis ------------------- class Chassis(cqparts.Part): # Parameters width = PositiveFloat(50, doc="chassis width") _render = render_props(template='wood_light') def make(self): points = [ # chassis outline (-60,0),(-60,22),(-47,23),(-37,40), (5,40),(23,25),(60,22),(60,0), ] return cadquery.Workplane('XZ', origin=(0,self.width/2,0)) \ .moveTo(*points[0]).polyline(points[1:]).close() \ .extrude(self.width) # ------------------- Wheel Assembly ------------------- from cqparts.constraint import Fixed, Coincident class WheeledAxle(cqparts.Assembly): left_width = PositiveFloat(7, doc="left wheel width") right_width = PositiveFloat(7, doc="right wheel width") left_diam = PositiveFloat(25, doc="left wheel diameter") right_diam = PositiveFloat(25, doc="right wheel diameter") axle_diam = PositiveFloat(8, doc="axle diameter") axle_track = PositiveFloat(50, doc="distance between wheel tread midlines") wheel_clearance = PositiveFloat(3, doc="distance between wheel and chassis") def make_components(self): axle_length = self.axle_track - (self.left_width + self.right_width) / 2 return { 'axle': Axle(length=axle_length, diameter=self.axle_diam), 'left_wheel': Wheel( width=self.left_width, diameter=self.left_diam, ), 'right_wheel': Wheel( width=self.right_width, diameter=self.right_diam, ), } def make_constraints(self): return [ Fixed(self.components['axle'].mate_origin, CoordSystem()), Coincident( self.components['left_wheel'].mate_origin, self.components['axle'].mate_left ), Coincident( self.components['right_wheel'].mate_origin, self.components['axle'].mate_right ), ] def apply_cutout(self, part): # Cut wheel & axle from given part axle = self.components['axle'] left_wheel = self.components['left_wheel'] right_wheel = self.components['right_wheel'] local_obj = part.local_obj local_obj = local_obj \ .cut((axle.world_coords - part.world_coords) + axle.get_cutout()) \ .cut((left_wheel.world_coords - part.world_coords) + left_wheel.get_cutout(self.wheel_clearance)) \ .cut((right_wheel.world_coords - part.world_coords) + right_wheel.get_cutout(self.wheel_clearance)) part.local_obj = local_obj # ------------------- Car Assembly ------------------- class Car(cqparts.Assembly): # Parameters wheelbase = PositiveFloat(70, "distance between front and rear axles") axle_track = PositiveFloat(60, "distance between tread midlines") # wheels wheel_width = PositiveFloat(10, doc="width of all wheels") front_wheel_diam = PositiveFloat(30, doc="front wheel diameter") rear_wheel_diam = PositiveFloat(30, doc="rear wheel diameter") axle_diam = PositiveFloat(10, doc="axle diameter") def make_components(self): return { 'chassis': Chassis(width=self.axle_track), 'front_axle': WheeledAxle( left_width=self.wheel_width, right_width=self.wheel_width, left_diam=self.front_wheel_diam, right_diam=self.front_wheel_diam, axle_diam=self.axle_diam, axle_track=self.axle_track, ), 'rear_axle': WheeledAxle( left_width=self.wheel_width, right_width=self.wheel_width, left_diam=self.rear_wheel_diam, right_diam=self.rear_wheel_diam, axle_diam=self.axle_diam, axle_track=self.axle_track, ), } def make_constraints(self): return [ Fixed(self.components['chassis'].mate_origin), Coincident( self.components['front_axle'].mate_origin, Mate(self.components['chassis'], CoordSystem((self.wheelbase/2,0,0))), ), Coincident( self.components['rear_axle'].mate_origin, Mate(self.components['chassis'], CoordSystem((-self.wheelbase/2,0,0))), ), ] def make_alterations(self): # cut out wheel wells chassis = self.components['chassis'] self.components['front_axle'].apply_cutout(chassis) self.components['rear_axle'].apply_cutout(chassis) # ------------------- Export / Display ------------------- from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models wheel = Wheel() axle = Axle() chassis = Chassis() wheeled_axle = WheeledAxle(right_width=2) car = Car() if env_name == 'cmdline': wheel.exporter('gltf')('wheel.gltf') axle.exporter('gltf')('axle.gltf') chassis.exporter('gltf')('chassis.gltf') wheeled_axle.exporter('gltf')('wheel-assembly.gltf') print(wheeled_axle.tree_str(name='wheel_assembly')) car.exporter('gltf')('car.gltf') print(car.tree_str(name='car')) car.find('chassis').exporter('gltf')('chassis-altered.gltf') elif env_name == 'freecad': pass # manually switchable for testing #display(wheel) #display(axle) #display(chassis) #display(wheeled_axle) display(car) #display(car.find('chassis'))
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/asm-build-cycle/generate.py
docs/doc/_static/iframes/asm-build-cycle/generate.py
#!/usr/bin/env python # -------------------------- Cylinder Part -------------------------- import cadquery import cqparts from cqparts.params import * from cqparts.display import display, render_props from cqparts.utils.geometry import CoordSystem from cqparts.constraint import Mate class Cylinder(cqparts.Part): diam = PositiveFloat(10, doc="cylinder's diameter") length = PositiveFloat(10, doc="cylinder's length") embed = PositiveFloat(2, doc="embedding distance") hole_diam = PositiveFloat(2.72, doc="pilot hole diam") _render = render_props(alpha=0.8) def make_base_cylinder(self): # This is used as a basis for make() and cutaway() return cadquery.Workplane('XY') \ .circle(self.diam/2).extrude(self.embed + self.length) def make(self): # Use the base cylindrical shape, and cut a hole through it return self.make_base_cylinder() \ .faces(">Z").hole(self.hole_diam / 2) @property def cutaway(self): # Use the base cylindrical shape, no alterations return self.make_base_cylinder() @property def mate_embedded(self): return Mate(self, CoordSystem((0, 0, self.embed))) # -------------------------- Plate Part -------------------------- from math import sin, cos, radians class Plate(cqparts.Part): length = PositiveFloat(20, doc="plate length") width = PositiveFloat(20, doc="plate width") thickness = PositiveFloat(10, doc="plate thickness") hole_diam = PositiveFloat(3, doc="hole diameter") connection_offset = Float(4, doc="hole's offset from plate center along x-axis") connection_angle = Float(15, doc="angle of mate point") def make(self): plate = cadquery.Workplane('XY') \ .box(self.length, self.width, self.thickness) hole_tool = cadquery.Workplane('XY', origin=(0, 0, -self.thickness * 5)) \ .circle(self.hole_diam / 2).extrude(self.thickness * 10) hole_tool = self.mate_hole.local_coords + hole_tool return plate.cut(hole_tool) @property def mate_hole(self): return Mate(self, CoordSystem( origin=(self.connection_offset, 0, self.thickness/2), xDir=(1, 0, 0), normal=(0, -sin(radians(self.connection_angle)), cos(radians(self.connection_angle))), )) # -------------------------- Demo Assembly -------------------------- from cqparts.constraint import Fixed, Coincident class Thing(cqparts.Assembly): # Components are updated to self.components first def make_components(self): return { 'pla': Plate(), 'cyl': Cylinder(), } # Then constraints are appended to self.constraints (second) def make_constraints(self): plate = self.components['pla'] cylinder = self.components['cyl'] return [ Fixed( mate=plate.mate_origin, world_coords=CoordSystem(origin=(-1,-5,-2), xDir=(-0.5,1,0)) # a bit of random placement ), Coincident( mate=cylinder.mate_embedded, to_mate=plate.mate_hole, ), ] # In between updating components, and adding constraints: # self.solve() is run. # This gives each component a valid world_coords value, so # we can use it in the next step... # Lastly, this function is run (any return is ignored) def make_alterations(self): # get Cylinder's location relative to the Plate coords = self.components['cyl'].world_coords - self.components['pla'].world_coords # apply that to the "cutout" we want to subtract from the plate cutout = coords + self.components['cyl'].cutaway self.components['pla'].local_obj = self.components['pla'].local_obj.cut(cutout) # -------------------------- Multiple Cycles -------------------------- from cqparts_misc.basic.primatives import Cube, Box, Sphere class BlockStack(cqparts.Assembly): def make_components(self): print("make Box 'a'") yield {'a': Box(length=10, width=10, height=20)} # grey print("make 2 Cubes 'b', and 'c'") yield { 'b': Cube(size=8, _render={'color': (255, 0, 0)}), # red 'c': Cube(size=3, _render={'color': (0, 255, 0)}), # green } print("make sphere 'd'") yield {'d': Sphere(radius=3, _render={'color': (0, 0, 255)})} # blue def make_constraints(self): print("place 'a' at origin") a = self.components['a'] yield [Fixed(a.mate_origin, CoordSystem((0,0,-10)))] print("place 'b' & 'c' relative to 'a'") b = self.components['b'] c = self.components['c'] yield [ Fixed(b.mate_bottom, a.world_coords + a.mate_pos_x.local_coords), Fixed(c.mate_bottom, a.world_coords + a.mate_neg_y.local_coords), ] print("place sphere 'd' on cube 'b'") d = self.components['d'] yield [Fixed(d.mate_origin, b.world_coords + b.mate_pos_x.local_coords)] def make_alterations(self): print("first round alteration(s)") yield print("second round alteration(s)") yield print("third round alteration(s)") yield # ------------------- Export / Display ------------------- # ------- Functions from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models cylinder = Cylinder() plate = Plate() thing = Thing() block_stack = BlockStack() if env_name == 'cmdline': pass # manually switchable for testing cylinder.exporter('gltf')('cylinder.gltf') plate.exporter('gltf')('plate.gltf') thing.exporter('gltf')('thing.gltf') thing.find('pla').exporter('gltf')('plate-altered.gltf') block_stack.exporter('gltf')('block_stack.gltf') #display(block_stack) elif env_name == 'freecad': pass # manually switchable for testing #display(cylinder) #display(plate) #display(thing.find('pla')) display(thing) #display(block_stack)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/biscuit/generate.py
docs/doc/_static/iframes/biscuit/generate.py
#!/usr/bin/env python # ------------------- Wood Panel ------------------- from math import radians, tan, cos import cadquery import cqparts from cqparts.params import * from cqparts.constraint import Mate from cqparts.utils import CoordSystem from cqparts.display import display, render_props class Panel(cqparts.Part): # dimensions height = PositiveFloat(50, doc="panel height (along join)") depth = PositiveFloat(50, doc="panel depth (from join to opposite side)") width = PositiveFloat(10, doc="thickness of panel") join_angle = FloatRange(0, 89, 45, doc="angle of join (unit: degrees)") _render = render_props(template='wood', alpha=0.5) def make(self): points = [ (0, 0), (self.depth, 0), (self.depth, self.width), (self.width * tan(radians(self.join_angle)), self.width), ] return cadquery.Workplane('XZ', origin=(0, self.height / 2, 0)) \ .moveTo(*points[0]).polyline(points[1:]).close() \ .extrude(self.height) def get_mate_join(self, ratio=0.5): # Return a mate that's somewhere along the join surface. return Mate(self, ( CoordSystem().rotated( (0, -(90 - self.join_angle), 0) ) + CoordSystem( origin=( (self.width / cos(radians(self.join_angle))) / 2, (-self.height / 2) + (self.height * ratio), 0 ), ) )) @property def mate_join(self): # default is half way along join return self.get_mate_join(ratio=0.5) @property def mate_join_reverse(self): # reversed join rotated around X-axis 180 deg return self.mate_join + CoordSystem().rotated((180, 0, 0)) # ------------------- Biscuit ------------------- class Biscuit(cqparts.Part): # Biscuit Dimensions width = PositiveFloat(30, doc="twice penetration depth") length = PositiveFloat(None, doc="length tip to tip") thickness = PositiveFloat(5, doc="material thickness") _render = render_props(template='wood_dark') def initialize_parameters(self): super(Biscuit, self).initialize_parameters() # set default length as a ratio of width if self.length is None: self.length = (5. / 3) * self.width def make(self): # We'll just use the simplified model for this example return self.make_simple() # It could be rounded at the ends, and the sides chamfered, but # for this example we'll just keep it simple. def make_simple(self): # Biscuit shaped like a eye, 2 arcs from end to end (length) # Create left & right side, union them together biscuit = cadquery.Workplane('XY') for i in [1, -1]: biscuit = biscuit.union( cadquery.Workplane('XY', origin=(0, 0, -self.thickness / 2)) \ .moveTo(self.length / 2, 0) \ .threePointArc( (0, i * self.width / 2), (-self.length / 2, 0) ).close().extrude(self.thickness) ) return biscuit def make_cutter(self): # the cutaway material is the same shape as the biscuit itself # (the simplified model) return self.make_simple() # ------------------- Biscuit Fastener ------------------- from cqparts_fasteners.fasteners.base import Fastener from cqparts_fasteners.utils.evaluator import Evaluator, VectorEvaluator from cqparts_fasteners.utils.selector import Selector from cqparts_fasteners.utils.applicator import Applicator from cqparts.constraint import Fixed from itertools import chain class BiscuitFastener(Fastener): # Parameters ratio = FloatRange(0, 1, 0.5, doc="ratio penetration of biscuit into parts") cut_biscuit_holes = Boolean(True, doc="if True, biscuit holes are cut into pannels") class Evaluator(Evaluator): # Bi-directional evaluator, employes 2 VectorEvaluator instances that, # on their own, evaluate in the -Z direction def __init__(self, parts, location, parent=None): super(BiscuitFastener.Evaluator, self).__init__(parts=parts, parent=parent) self.location = location # positive z direction self.eval_pos = VectorEvaluator(parts, location.rotated((180, 0, 0))) # negative z direction self.eval_neg = VectorEvaluator(parts, location) def perform_evaluation(self): return (self.eval_pos.eval, self.eval_neg.eval) class Selector(Selector): def get_components(self): # Determine maximum biscuit width from the evaluations (pos, neg) = self.evaluator.eval pos_length = abs(pos[-1].end_point - pos[0].start_point) neg_length = abs(neg[-1].end_point - neg[0].start_point) max_width = 2 * min( pos_length * self.parent.ratio, # parent is the BiscuitFastener instance neg_length * self.parent.ratio ) return { 'biscuit': Biscuit( width=max_width, thickness=max_width * 0.1, ), } def get_constraints(self): #(pos, neg) = self.evaluator.eval return [ Fixed( self.components['biscuit'].mate_origin, CoordSystem().rotated((90, 0, 90)) # correctly orientate biscuit ), ] class Applicator(Applicator): def apply_alterations(self): if not self.parent.cut_biscuit_holes: return # fastener configured to place biscuit overlapping panel # Get the biscuit cutout shape biscuit = self.selector.components['biscuit'] biscuit_cutter = biscuit.make_cutter() # cutter in local coords # Duplicate parts possible with floating point rounding, because the # evaluator is placed on the 2 planar surfaces being joined. effected_parts = set([ # duplicates are removed within the set effect.part for effect in chain(*self.evaluator.eval[:]) ]) # Move biscuit relative to altered part's local coordinates, then # alter the part's local_obj. for part in effected_parts: biscuit_coordsys = biscuit.world_coords - part.world_coords part.local_obj = part.local_obj.cut(biscuit_coordsys + biscuit_cutter) # ------------------- Corner Assembly ------------------- from cqparts.constraint import Fixed, Coincident class CornerAssembly(cqparts.Assembly): biscuit_count = PositiveInt(2, doc="number of biscuits") join_angle = FloatRange(0, 89, 45, doc="angle of join (unit: degrees)") biscuit_holes = Boolean(True, doc="if True, holes are cut into pannels to house biscuits") def make_components(self): components = { 'a': Panel(join_angle=self.join_angle), 'b': Panel(join_angle=self.join_angle), } for i in range(self.biscuit_count): components['f_%i' % i] = BiscuitFastener( parts=[components['a'], components['b']], cut_biscuit_holes=self.biscuit_holes, ) return components def make_constraints(self): # position joined panels a = self.components['a'] b = self.components['b'] yield [ Fixed(a.mate_origin), Coincident( b.mate_join_reverse, a.mate_join ), ] # position biscuits along join biscuits = [ c for c in self.components.values() if isinstance(c, BiscuitFastener) ] yield [ Coincident( c.mate_origin, a.get_mate_join( ratio=(i + 1) * (1. / (len(biscuits) + 1)) ) ) for (i, c) in enumerate(biscuits) ] # ------------------- Export / Display ------------------- from cqparts.display import display from cqparts.display import get_display_environment env_name = get_display_environment().name # ------- Models panel = Panel() biscuit = Biscuit() corner_assembly = CornerAssembly( join_angle=45, biscuit_holes=True, ) if env_name == 'cmdline': panel.exporter('gltf')('panel.gltf') biscuit.exporter('gltf')('biscuit.gltf') corner_assembly.exporter('gltf')('corner_assembly.gltf') print(corner_assembly.tree_str()) #display(panel) #display(biscuit) #display(corner_assembly) elif env_name == 'freecad': #display(panel) #display(biscuit) display(corner_assembly)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/fastener-screw/generate.py
docs/doc/_static/iframes/fastener-screw/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Thing ------------------- import cqparts from cqparts.constraint import Fixed, Coincident from cqparts_fasteners.fasteners.screw import ScrewFastener from cqparts_misc.basic.primatives import Box from cqparts.utils import CoordSystem class Thing(cqparts.Assembly): def make_components(self): base = Box(length=20, width=30, height=15) top = Box(length=40, width=20, height=5) return { 'base': base, 'top': top, 'fastener': ScrewFastener(parts=[base, top]), } def make_constraints(self): base = self.components['base'] top = self.components['top'] fastener = self.components['fastener'] return [ Fixed(base.mate_bottom), Coincident(top.mate_bottom, base.mate_top), Coincident(fastener.mate_origin, top.mate_top), ] thing = Thing() # ------------------- Export ------------------- from cqparts.params import * from cqparts.display import display from cqparts.display import get_display_environment env_name = get_display_environment().name if env_name == 'freecad': display(thing) else: thing.exporter('gltf')('thing.gltf', embed=False)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/docs/doc/_static/iframes/fastener-nut-bolt/generate.py
docs/doc/_static/iframes/fastener-nut-bolt/generate.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Thing ------------------- import cqparts from cqparts.constraint import Fixed, Coincident from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener from cqparts_misc.basic.primatives import Box class Thing(cqparts.Assembly): def make_components(self): base = Box(length=20, width=30, height=15) top = Box(length=40, width=20, height=5) return { 'base': base, 'top': top, 'fastener': NutAndBoltFastener(parts=[base, top]), } def make_constraints(self): base = self.components['base'] top = self.components['top'] fastener = self.components['fastener'] return [ Fixed(base.mate_bottom), Coincident(top.mate_bottom, base.mate_top), Coincident(fastener.mate_origin, top.mate_top), ] thing = Thing() # ------------------- Export ------------------- from cqparts.params import * from cqparts.display import display from cqparts.display import get_display_environment env_name = get_display_environment().name if env_name == 'freecad': display(thing) else: thing.exporter('gltf')('thing.gltf', embed=False)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/examples/logo.py
examples/logo.py
""" This example makes the cqparts logo """ from cqparts import Assembly class CQPartsLogo(Assembly): # TODO pass
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/examples/fastener_easyinstall.py
examples/fastener_easyinstall.py
""" Functional implementation of the tutorial in :ref:`tutorials_fastener-easy-install` """ import cadquery import cqparts # cqparts: parameters from cqparts.params import PositiveFloat, Boolean # cqparts: fasteners from cqparts_fasteners import Fastener from cqparts_fasteners.utils.evaluator import VectorEvaluator from cqparts_fasteners.utils.selector import Selector from cqparts_fasteners.utils.applicator import Applicator # from cqparts_fasteners.params import HeadType, DriveType, ThreadType # -------------------------- Parts -------------------------- class WoodScrew(cqparts.Part): diameter = PositiveFloat(default=3, doc="bore hole diameter") thread_length = PositiveFloat(default=5, doc="distance the screw bores into part") # TODO: more parameters def make(self): TODO: code for wood screw make() class Anchor(cqparts.Part): diameter = PositiveFloat(default=10, doc="bore diameter for anchor") reversed = Boolean(default=False, doc="if True, screw drive is put on the reverse side") # TODO more parameters def make(self): # TODO: code to build anchor # -------------------------- Fastener -------------------------- class EasyInstallEvaluator(VectorEvaluator): def __init__(self, parts, start, dir, anchor_plane): super(EasyInstallEvaluator, self).__init__(parts, start, dir) self.anchor_plane = anchor_plane @property def anchor_norm(self): return self.anchor_plane.zDir class EasyInstallSelector(Selector): #TODO: code for selector # note: selector must return a wood-screw and anchor # does it return a Fastener instance? pass def get_selection(self): # TODO: returns single Fastener instance # if there are multiple viable choices, it's up to the selector # to narrow it down to a single selection. pass class EasyInstallApplicator(Applicator): # TODO: code for applicator pass class EasyInstallFastener(Fastener): EVALUATOR_CLASS = EasyInstallEvaluator SELECTOR_CLASS = EasyInstallSelector APPLICATOR_CLASS = EasyInstallApplicator def make(self): screw = WoodScrew() # TODO: parameters + mate anchor = Anchor() # TODO: parameters + mate return { 'wood_screw': screw, 'anchor': anchor, } # -------------------------- Using the Fastener -------------------------- import cadquery class Panel1(cqparts.Part): def make(self): return cadquery.Workplane('XY', origin=(0, -50, -10)) \ .box(100, 100, 10, centered=(False, False, False)) class Panel2(cqparts.Part): def make(self): return cadquery.Workplane('XY', origin=(0, -50, 0)) \ .box(10, 100, 100, centered=(False, False, False)) # TODO: 2 instances of the same panel, different orientation horizontal = Panel1() vertical = Panel2() # Find vertical panel's centre # note: the vertical panel is built horizontally, **then** rotated upright. # so we're finding the vector we want in it's local coordinates, then # we're converting them to word coordinates to perform the evaluation. v_bot = vertical.local_obj.faces("<Z").workplane().plane v_top = vertical.local_obj.faces(">Z").workplane().plane midpoint = (v_bot.origin + v_top.origin).multiply(0.5) # direction of bolt (normal to horizontal panel) evaluation = EasyInstallFastener.evaluate( parts=[horizontal, vertical], start=mid, parts, start, dir, anchor_plane
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/examples/toy_car.py
examples/toy_car.py
#!/usr/bin/env python # The code here should be representative of that in: # https://cqparts.github.io/cqparts/doc/tutorials/assembly.html # ------------------- Wheel ------------------- import cadquery import cqparts from cqparts.params import * from cqparts.display import render_props, display class Wheel(cqparts.Part): # Parameters width = PositiveFloat(10, doc="width of wheel") diameter = PositiveFloat(30, doc="wheel diameter") # default appearance _render = render_props(template='wood_dark') def make(self): wheel = cadquery.Workplane('XY') \ .circle(self.diameter / 2).extrude(self.width) hole = cadquery.Workplane('XY') \ .circle(2).extrude(self.width/2).faces(">Z") \ .circle(4).extrude(self.width/2) wheel = wheel.cut(hole) return wheel def get_cutout(self, clearance=0): # A cylinder with a equal clearance on every face return cadquery.Workplane('XY', origin=(0, 0, -clearance)) \ .circle((self.diameter / 2) + clearance) \ .extrude(self.width + (2 * clearance)) # ------------------- Axle ------------------- from cqparts.constraint import Mate from cqparts.utils.geometry import CoordSystem class Axle(cqparts.Part): # Parameters length = PositiveFloat(50, doc="axle length") diameter = PositiveFloat(10, doc="axle diameter") # default appearance _render = render_props(color=(50, 50, 50)) # dark grey def make(self): axle = cadquery.Workplane('ZX', origin=(0, -self.length/2, 0)) \ .circle(self.diameter / 2).extrude(self.length) cutout = cadquery.Workplane('ZX', origin=(0, -self.length/2, 0)) \ .circle(1.5).extrude(10) axle = axle.cut(cutout) cutout = cadquery.Workplane('XZ', origin=(0, self.length/2, 0)) \ .circle(1.5).extrude(10) axle = axle.cut(cutout) return axle # wheel mates, assuming they rotate around z-axis @property def mate_left(self): return Mate(self, CoordSystem( origin=(0, -self.length / 2, 0), xDir=(1, 0, 0), normal=(0, -1, 0), )) @property def mate_right(self): return Mate(self, CoordSystem( origin=(0, self.length / 2, 0), xDir=(1, 0, 0), normal=(0, 1, 0), )) def get_cutout(self, clearance=0): return cadquery.Workplane('ZX', origin=(0, -self.length/2 - clearance, 0)) \ .circle((self.diameter / 2) + clearance) \ .extrude(self.length + (2 * clearance)) # ------------------- Chassis ------------------- class Chassis(cqparts.Part): # Parameters width = PositiveFloat(50, doc="chassis width") _render = render_props(template='wood_light') def make(self): points = [ # chassis outline (-60,0),(-60,22),(-47,23),(-37,40), (5,40),(23,25),(60,22),(60,0), ] return cadquery.Workplane('XZ', origin=(0,self.width/2,0)) \ .moveTo(*points[0]).polyline(points[1:]).close() \ .extrude(self.width) # ------------------- Wheel Assembly ------------------- from cqparts.constraint import Fixed, Coincident class WheeledAxle(cqparts.Assembly): left_width = PositiveFloat(7, doc="left wheel width") right_width = PositiveFloat(7, doc="right wheel width") left_diam = PositiveFloat(25, doc="left wheel diameter") right_diam = PositiveFloat(25, doc="right wheel diameter") axle_diam = PositiveFloat(8, doc="axle diameter") axle_track = PositiveFloat(50, doc="distance between wheel tread midlines") wheel_clearance = PositiveFloat(3, doc="distance between wheel and chassis") def make_components(self): axle_length = self.axle_track - (self.left_width + self.right_width) / 2 return { 'axle': Axle(length=axle_length, diameter=self.axle_diam), 'left_wheel': Wheel( width=self.left_width, diameter=self.left_diam, ), 'right_wheel': Wheel( width=self.right_width, diameter=self.right_diam, ), } def make_constraints(self): return [ Fixed(self.components['axle'].mate_origin, CoordSystem()), Coincident( self.components['left_wheel'].mate_origin, self.components['axle'].mate_left ), Coincident( self.components['right_wheel'].mate_origin, self.components['axle'].mate_right ), ] def apply_cutout(self, part): # Cut wheel & axle from given part axle = self.components['axle'] left_wheel = self.components['left_wheel'] right_wheel = self.components['right_wheel'] local_obj = part.local_obj local_obj = local_obj \ .cut((axle.world_coords - part.world_coords) + axle.get_cutout()) \ .cut((left_wheel.world_coords - part.world_coords) + left_wheel.get_cutout(self.wheel_clearance)) \ .cut((right_wheel.world_coords - part.world_coords) + right_wheel.get_cutout(self.wheel_clearance)) part.local_obj = local_obj # ------------------- Car Assembly ------------------- class Car(cqparts.Assembly): # Parameters wheelbase = PositiveFloat(70, "distance between front and rear axles") axle_track = PositiveFloat(60, "distance between tread midlines") # wheels wheel_width = PositiveFloat(10, doc="width of all wheels") front_wheel_diam = PositiveFloat(30, doc="front wheel diameter") rear_wheel_diam = PositiveFloat(30, doc="rear wheel diameter") axle_diam = PositiveFloat(10, doc="axle diameter") def make_components(self): return { 'chassis': Chassis(width=self.axle_track), 'front_axle': WheeledAxle( left_width=self.wheel_width, right_width=self.wheel_width, left_diam=self.front_wheel_diam, right_diam=self.front_wheel_diam, axle_diam=self.axle_diam, axle_track=self.axle_track, ), 'rear_axle': WheeledAxle( left_width=self.wheel_width, right_width=self.wheel_width, left_diam=self.rear_wheel_diam, right_diam=self.rear_wheel_diam, axle_diam=self.axle_diam, axle_track=self.axle_track, ), } def make_constraints(self): return [ Fixed(self.components['chassis'].mate_origin), Coincident( self.components['front_axle'].mate_origin, Mate(self.components['chassis'], CoordSystem((self.wheelbase/2,0,0))), ), Coincident( self.components['rear_axle'].mate_origin, Mate(self.components['chassis'], CoordSystem((-self.wheelbase/2,0,0))), ), ] def make_alterations(self): # cut out wheel wells chassis = self.components['chassis'] self.components['front_axle'].apply_cutout(chassis) self.components['rear_axle'].apply_cutout(chassis) # ------------------- Display Result ------------------- # Could also export to another format if __name__ != 'toy_car': # not run as a module, so display result car = Car() from cqparts.display import display display(car)
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/examples/servo.py
examples/servo.py
from cqparts import Assembly class Servo(Assembly): # TODO pass
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
cqparts/cqparts
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/deployment/make-setup.py
deployment/make-setup.py
import codecs import os import io import sys import re import setuptools from distutils.version import LooseVersion import argparse import pprint import jinja2 LIB_PARENT_DIR = os.path.join('..', 'src') HERE = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, LIB_PARENT_DIR) parser = argparse.ArgumentParser(description='Deployment script') def module_type(value): module = __import__(value) # Verify lib exists in ../src directory if not os.path.exists(os.path.join(LIB_PARENT_DIR, value)): raise argparse.ArgumentTypeError( "library '{lib}' cannot be found in folder '{parent}'".format( lib=value, parent=LIB_PARENT_DIR, ) ) # Verify imported module is that in ../src/{module name} (even if referenced by symlink) module_filename = module.__file__ local_filename = os.path.join( LIB_PARENT_DIR, value, os.path.basename(module_filename) ) if os.stat(module_filename).st_ino != os.stat(local_filename).st_ino: raise argparse.ArgumentTypeError( "imported '{lib}' lib is not local".format(module.__name__) ) # Verify __name__ is equal to the containing folder's name if module.__name__ != value: raise argparse.ArgumentTypeError( "imported {lib!r} but the __name__ of '{name}' is invalid, expecting {expected}".format( lib=module, name=module.__name__, expected=value, ) ) # Verify module is ready for release if getattr(module, '__release_ready__', True) != True: raise argparse.ArgumentTypeError( "library '{lib}' is not ready for release".format(lib=value) ) return module parser.add_argument( '--lib', dest='lib', type=module_type, default=module_type('cqparts'), help='library being deployed', ) args = parser.parse_args() PACKAGES = setuptools.find_packages( where=LIB_PARENT_DIR, include=(args.lib.__name__, args.lib.__name__ + '.*'), ) CLASSIFIERS = [ "Intended Audience :: Developers", "Intended Audience :: Manufacturing", "Intended Audience :: End Users/Desktop", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS", "Operating System :: POSIX", "Operating System :: Unix", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering", "Topic :: Multimedia :: Graphics :: 3D Modeling", "Topic :: Multimedia :: Graphics :: 3D Rendering", #"Development Status :: ???" added later ] # pre-requisite packages, get from library's requirements.txt file INSTALL_REQUIRES = [] requirements_filename = os.path.join(LIB_PARENT_DIR, args.lib.__name__, 'requirements.txt') if os.path.exists(requirements_filename): with open(requirements_filename, 'r') as fh: INSTALL_REQUIRES = [ l.rstrip('\n') for l in fh.readlines() if not re.search(r'^(.*#|\s*$)', l) # invalid: contains #, or is a blank line ] SCRIPTS = [ # scripts callable from a standard shell upon package installation by end-user # (none) ] def read(*parts): """ Build an absolute path from *parts* and and return the contents of the resulting file. Assume UTF-8 encoding. """ return codecs.open(os.path.join(HERE, *parts), "rb", "utf-8").read() # Development Status Classifier VERSION_CLASSIFIER_MAP = [ (LooseVersion('0.1'), "Development Status :: 2 - Pre-Alpha"), (LooseVersion('0.2'), "Development Status :: 3 - Alpha"), (LooseVersion('0.3'), "Development Status :: 4 - Beta"), (LooseVersion('1.0'), "Development Status :: 5 - Production/Stable"), ] def version_classifier(version_str): """ Verify version consistency: version number must correspond to the correct "Development Status" classifier :raises: ValueError if error found, but ideally this function does nothing """ # cast version version = LooseVersion(version_str) for (test_ver, classifier) in reversed(sorted(VERSION_CLASSIFIER_MAP, key=lambda x: x[0])): if version >= test_ver: return classifier raise ValueError("could not find valid 'Development Status' classifier for v{}".format(version_str)) CLASSIFIERS.append(version_classifier(args.lib.__version__)) # ------- Mainline -------- def setup_standin(**kwargs): # Used instead of `setuptools.setup`; # Write a clean `setup.py` file to execute or building & installation. # # "Why on earth are you doing this?" I hear you ask: # "That's a fair question" I reply... # # originally this *was* the `setup.py` file used to *build* the distribution files. # However, I have since learnt is that the `setup.py` file itself is # distributed with the build module(s). It is used to *install* the library on # each end-user's system. # # I think you'll agree that the above code has no place on an end-user's # system; it's highly reliant on it being executed from inside this repository. # # Therefore, I've chosen to serialize the kwargs designed for `setuptools.setup` # and write them to a very simple `setup.py` file. # Normally I abhor unnecessarily generating code to execute, but I believe, # in this case, it's necessary to keep deployment clean. params_str = pprint.PrettyPrinter(indent=2).pformat(kwargs) with open('setup.py.jinja', 'r') as tmp, open(os.path.join(LIB_PARENT_DIR, 'setup.py'), 'w') as output: template = jinja2.Template(tmp.read()) output.write(template.render(params=params_str)) #setuptools.setup( setup_standin( name=args.lib.__name__, description=args.lib.__description__, license=args.lib.__license__, url=args.lib.__url__, version=args.lib.__version__, author=args.lib.__author__, author_email=args.lib.__email__, maintainer=args.lib.__author__, maintainer_email=args.lib.__email__, keywords=args.lib.__keywords__, long_description=io.open( os.path.join(LIB_PARENT_DIR, args.lib.__name__, 'README.rst'), encoding='utf-8', ).read(), packages=PACKAGES, #package_dir={'': LIB_PARENT_DIR}, package_data={'': ['LICENSE'] + getattr(args.lib, '__package_data__', [])}, zip_safe=False, classifiers=CLASSIFIERS, install_requires=INSTALL_REQUIRES, scripts=SCRIPTS, )
python
Apache-2.0
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
2026-01-05T07:14:41.025281Z
false
mortensteenrasmussen/docker-registry-manifest-cleanup
https://github.com/mortensteenrasmussen/docker-registry-manifest-cleanup/blob/b03663e699c7e3c0df7ed1d4b96a3d90528a4cf5/docker-registry-cleanup.py
docker-registry-cleanup.py
import glob import urllib3 from requests.auth import HTTPBasicAuth import requests import json import re import os import boto from boto.s3.key import Key ############################ ######## Functions ######### ############################ def exit_with_error(message): print(message) print("Exiting") exit(1) # Initial setup try: if "DRY_RUN" in os.environ and os.environ['DRY_RUN'] == "true": dry_run_mode = True print("Running in dry-run mode. No changes will be made.") print() else: dry_run_mode = False if "REGISTRY_STORAGE" in os.environ and os.environ['REGISTRY_STORAGE'] == "S3": print("Running against S3 storage") storage_on_s3 = True s3_access_key = os.environ['ACCESS_KEY'] s3_secret_key = os.environ['SECRET_KEY'] s3_bucket = os.environ['BUCKET'] s3_region = os.environ['REGION'] if "REGISTRY_DIR" in os.environ: registry_dir = os.environ['REGISTRY_DIR'] else: registry_dir = "/" else: print("Running against local storage") storage_on_s3 = False if "REGISTRY_DIR" in os.environ: registry_dir = os.environ['REGISTRY_DIR'] else: registry_dir = "/registry" registry_url = os.environ['REGISTRY_URL'] except KeyError as e: exit_with_error("Missing environment variable: %s" % (e)) # Optional vars if "REGISTRY_AUTH" in os.environ: registry_auth = HTTPBasicAuth(os.environ["REGISTRY_AUTH"].split(":")[0], os.environ["REGISTRY_AUTH"].split(":")[1]) else: registry_auth = {} if "SELF_SIGNED_CERT" in os.environ: cert_verify = False urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) else: cert_verify = True token_authentication = False token_auth_details = {} # Check connection to registry try: r = requests.get("%s/v2/" % (registry_url), auth=registry_auth, verify=cert_verify) if r.status_code == 401: if "Www-Authenticate" in r.headers and "Bearer" in r.headers["Www-Authenticate"]: #We have token based auth, try it auth_header = r.headers["Www-Authenticate"].split(" ")[1] token_authentication = True token_auth_details = dict(s.split("=", 1) for s in re.sub('"',"",auth_header).split(",")) r2 = requests.get("%s?service=%s&scope=" % (token_auth_details["realm"],token_auth_details["service"]), auth=registry_auth, verify=cert_verify) if r2.status_code == 401: exit_with_error("Got an authentication error connecting to the registry - even with token authentication. Check credentials, or add REGISTRY_AUTH='username:password'") else: auth_token = r2.json()["token"] registry_headers = {"Authorization": "Bearer %s" % (auth_token)} else: exit_with_error("Got an authentication error connecting to the registry. Check credentials, or add REGISTRY_AUTH='username:password'") except requests.exceptions.SSLError as e: exit_with_error("Got an SSLError connecting to the registry. Might be a self signed cert, please set SELF_SIGNED_CERT=true") except requests.exceptions.RequestException as e: exit_with_error("Could not contact registry at %s - error: %s" % (registry_url, e)) # Set variables repo_dir = registry_dir + "/docker/registry/v2/repositories" blob_dir = registry_dir + "/docker/registry/v2/blobs" all_manifests = set() linked_manifests = set() linked_manifest_files = set() file_list = set() if storage_on_s3: bucket_size = 0 # Connect to bucket conn = boto.s3.connect_to_region(s3_region, aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key) bucket = conn.get_bucket(s3_bucket) s3_file_list = bucket.list() #get all the filenames in bucket as well as size for key in s3_file_list: bucket_size += key.size file_list.add(key.name) else: #local storage for filename in glob.iglob("%s/**" % (registry_dir), recursive=True): if os.path.isfile(filename): file_list.add(filename) for filename in file_list: if filename.endswith("link"): if "_manifests/revisions/sha256" in filename: all_manifests.add(re.sub('.*docker/registry/v2/repositories/.*/_manifests/revisions/sha256/(.*)/link','\\1',filename)) elif "_manifests/tags/" in filename and filename.endswith("/current/link"): linked_manifest_files.add(filename) #fetch linked_manifest_files for filename in linked_manifest_files: error = False if storage_on_s3: k = Key(bucket) k.key = filename #Get the shasum from the link file shasum = k.get_contents_as_string().decode().split(":")[1] #Get the manifest json to check if its a manifest list k.key = "%s/sha256/%s/%s/data" % (blob_dir, shasum[0:2], shasum) try: manifest = json.loads(k.get_contents_as_string().decode()) except Exception as e: error = True print("Caught error trying to read manifest, ignoring.") else: shasum = open(filename, 'r').read().split(":")[1] try: manifest = json.loads(open("%s/sha256/%s/%s/data" % (blob_dir, shasum[0:2], shasum)).read()) except Exception as e: error = True print("Caught error trying to read manifest, ignoring.") if error: linked_manifests.add(shasum) else: manifest_media_type = manifest["mediaType"] if manifest_media_type == "application/vnd.docker.distribution.manifest.list.v2+json": #add all manifests from manifest list for mf in manifest["manifests"]: linked_manifests.add(mf["digest"]) else: linked_manifests.add(shasum) unused_manifests = all_manifests - linked_manifests if len(unused_manifests) == 0: print("No manifests without tags found. Nothing to do.") if storage_on_s3: print("For reference, the size of the bucket is currently: %s bytes" % (bucket_size)) else: print("Found " + str(len(unused_manifests)) + " manifests without tags. Deleting") #counters current_count = 0 cleaned_count = 0 failed_count = 0 total_count = len(unused_manifests) for manifest in unused_manifests: current_count += 1 status_msg = "Cleaning %s of %s" % (current_count, total_count) if "DRY_RUN" in os.environ and os.environ['DRY_RUN'] == "true": status_msg += " ..not really, due to dry-run mode" print(status_msg) #get repos repos = set() for file in file_list: if "_manifests/revisions/sha256/%s" % (manifest) in file and file.endswith("link"): repos.add(re.sub(".*docker/registry/v2/repositories/(.*)/_manifests/revisions/sha256.*", "\\1", file)) for repo in repos: if dry_run_mode: print("DRY_RUN: Would have run an HTTP DELETE request to %s/v2/%s/manifests/sha256:%s" % (registry_url, repo, manifest)) else: if token_authentication: r2 = requests.get("%s?service=%s&scope=repository:%s:*" % (token_auth_details["realm"],token_auth_details["service"],repo), auth=registry_auth, verify=cert_verify) auth_token = r2.json()["token"] registry_headers = {"Authorization": "Bearer %s" % (auth_token)} r = requests.delete("%s/v2/%s/manifests/sha256:%s" % (registry_url, repo, manifest), verify=cert_verify, headers=registry_headers) else: r = requests.delete("%s/v2/%s/manifests/sha256:%s" % (registry_url, repo, manifest), auth=registry_auth, verify=cert_verify) if r.status_code == 202: cleaned_count += 1 else: failed_count += 1 print("Failed to clean manifest %s from repo %s with response code %s" % (manifest, repo, r.status_code)) print("Job done, Cleaned %s of %s manifests." % (cleaned_count, total_count)) print() print() if storage_on_s3: print("For reference, the size of the bucket before this run was: %s bytes" % (bucket_size)) print() print("Please run a garbage-collect on the registry now to free up disk space.")
python
Apache-2.0
b03663e699c7e3c0df7ed1d4b96a3d90528a4cf5
2026-01-05T07:14:47.461126Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/gradio_app.py
gradio_app.py
import argparse import glob import os import re import signal import subprocess import tempfile import time from dataclasses import dataclass from datetime import datetime from typing import Optional import gradio as gr import numpy as np import psutil import trimesh from threestudio.utils.config import load_config from threestudio.utils.typing import * def tail(f, window=20): # Returns the last `window` lines of file `f`. if window == 0: return [] BUFSIZ = 1024 f.seek(0, 2) remaining_bytes = f.tell() size = window + 1 block = -1 data = [] while size > 0 and remaining_bytes > 0: if remaining_bytes - BUFSIZ > 0: # Seek back one whole BUFSIZ f.seek(block * BUFSIZ, 2) # read BUFFER bunch = f.read(BUFSIZ) else: # file too small, start from beginning f.seek(0, 0) # only read what was not read bunch = f.read(remaining_bytes) bunch = bunch.decode("utf-8") data.insert(0, bunch) size -= bunch.count("\n") remaining_bytes -= BUFSIZ block -= 1 return "\n".join("".join(data).splitlines()[-window:]) @dataclass class ExperimentStatus: pid: Optional[int] = None progress: str = "" log: str = "" output_image: Optional[str] = None output_video: Optional[str] = None output_mesh: Optional[str] = None def tolist(self): return [ self.pid, self.progress, self.log, self.output_image, self.output_video, self.output_mesh, ] EXP_ROOT_DIR = "outputs-gradio" DEFAULT_PROMPT = "a delicious hamburger" model_name_config = [ ("SJC (Stable Diffusion)", "configs/gradio/sjc.yaml"), ("DreamFusion (DeepFloyd-IF)", "configs/gradio/dreamfusion-if.yaml"), ("DreamFusion (Stable Diffusion)", "configs/gradio/dreamfusion-sd.yaml"), ("TextMesh (DeepFloyd-IF)", "configs/gradio/textmesh-if.yaml"), ("Latent-NeRF (Stable Diffusion)", "configs/gradio/latentnerf.yaml"), ("Fantasia3D (Stable Diffusion, Geometry Only)", "configs/gradio/fantasia3d.yaml"), ] model_list = [m[0] for m in model_name_config] model_config: Dict[str, Dict[str, Any]] = {} for model_name, config_path in model_name_config: config = {"path": config_path} with open(config_path) as f: config["yaml"] = f.read() config["obj"] = load_config( config["yaml"], # set name and tag to dummy values to avoid creating new directories cli_args=[ "name=dummy", "tag=dummy", "use_timestamp=false", f"exp_root_dir={EXP_ROOT_DIR}", "system.prompt_processor.prompt=placeholder", ], from_string=True, ) model_config[model_name] = config def on_model_selector_change(model_name): return [ model_config[model_name]["yaml"], model_config[model_name]["obj"].system.guidance.guidance_scale, ] def get_current_status(process, trial_dir, alive_path): status = ExperimentStatus() status.pid = process.pid # write the current timestamp to the alive file # the watcher will know the last active time of this process from this timestamp if os.path.exists(os.path.dirname(alive_path)): alive_fp = open(alive_path, "w") alive_fp.seek(0) alive_fp.write(str(time.time())) alive_fp.flush() log_path = os.path.join(trial_dir, "logs") progress_path = os.path.join(trial_dir, "progress") save_path = os.path.join(trial_dir, "save") # read current progress from the progress file # the progress file is created by GradioCallback if os.path.exists(progress_path): status.progress = open(progress_path).read() else: status.progress = "Setting up everything ..." # read the last 10 lines of the log file if os.path.exists(log_path): status.log = tail(open(log_path, "rb"), window=10) else: status.log = "" # get the validation image and testing video if they exist if os.path.exists(save_path): images = glob.glob(os.path.join(save_path, "*.png")) steps = [ int(re.match(r"it(\d+)-0\.png", os.path.basename(f)).group(1)) for f in images ] images = sorted(list(zip(images, steps)), key=lambda x: x[1]) if len(images) > 0: status.output_image = images[-1][0] videos = glob.glob(os.path.join(save_path, "*.mp4")) steps = [ int(re.match(r"it(\d+)-test\.mp4", os.path.basename(f)).group(1)) for f in videos ] videos = sorted(list(zip(videos, steps)), key=lambda x: x[1]) if len(videos) > 0: status.output_video = videos[-1][0] export_dirs = glob.glob(os.path.join(save_path, "*export")) steps = [ int(re.match(r"it(\d+)-export", os.path.basename(f)).group(1)) for f in export_dirs ] export_dirs = sorted(list(zip(export_dirs, steps)), key=lambda x: x[1]) if len(export_dirs) > 0: obj = glob.glob(os.path.join(export_dirs[-1][0], "*.obj")) if len(obj) > 0: # FIXME # seems the gr.Model3D cannot load our manually saved obj file # here we load the obj and save it to a temporary file using trimesh mesh_path = tempfile.NamedTemporaryFile(suffix=".obj", delete=False) trimesh.load(obj[0]).export(mesh_path.name) status.output_mesh = mesh_path.name return status def run( model_name: str, config: str, prompt: str, guidance_scale: float, seed: int, max_steps: int, save_ckpt: bool, save_root: str, ): # update status every 1 second status_update_interval = 1 # save the config to a temporary file config_file = tempfile.NamedTemporaryFile() with open(config_file.name, "w") as f: f.write(config) # manually assign the output directory, name and tag so that we know the trial directory name = os.path.basename(model_config[model_name]["path"]).split(".")[0] tag = datetime.now().strftime("@%Y%m%d-%H%M%S") trial_dir = os.path.join(save_root, EXP_ROOT_DIR, name, tag) alive_path = os.path.join(trial_dir, "alive") # spawn the training process gpu = os.environ.get("CUDA_VISIBLE_DEVICES", "0") process = subprocess.Popen( f"python launch.py --config {config_file.name} --train --gpu {gpu} --gradio trainer.enable_progress_bar=false".split() + [ f'name="{name}"', f'tag="{tag}"', f"exp_root_dir={os.path.join(save_root, EXP_ROOT_DIR)}", "use_timestamp=false", f'system.prompt_processor.prompt="{prompt}"', f"system.guidance.guidance_scale={guidance_scale}", f"seed={seed}", f"trainer.max_steps={max_steps}", ] + ( ["checkpoint.every_n_train_steps=${trainer.max_steps}"] if save_ckpt else [] ), ) # spawn the watcher process watch_process = subprocess.Popen( "python gradio_app.py watch".split() + ["--pid", f"{process.pid}", "--trial-dir", f"{trial_dir}"] ) # update status (progress, log, image, video) every status_update_interval senconds # button status: Run -> Stop while process.poll() is None: time.sleep(status_update_interval) yield get_current_status(process, trial_dir, alive_path).tolist() + [ gr.update(visible=False), gr.update(value="Stop", variant="stop", visible=True), ] # wait for the processes to finish process.wait() watch_process.wait() # update status one last time # button status: Stop / Reset -> Run status = get_current_status(process, trial_dir, alive_path) status.progress = "Finished." yield status.tolist() + [ gr.update(value="Run", variant="primary", visible=True), gr.update(visible=False), ] def stop_run(pid): # kill the process print(f"Trying to kill process {pid} ...") try: os.kill(pid, signal.SIGKILL) except: print(f"Exception when killing process {pid}.") # button status: Stop -> Reset return [ # gr.update( # value="Reset (refresh the page if in queue)", # variant="secondary", # visible=True, # just ask the user to refresh the page # ), gr.update( value="Please Refresh the Page", variant="secondary", visible=True, interactive=False, ), gr.update(visible=False), ] def launch( port, listen=False, hf_space=False, self_deploy=False, save_ckpt=False, save_root=".", ): self_deploy = self_deploy or "TS_SELF_DEPLOY" in os.environ css = """ #config-accordion, #logs-accordion {color: black !important;} .dark #config-accordion, .dark #logs-accordion {color: white !important;} .stop {background: darkred !important;} """ with gr.Blocks( title="threestudio - Web Demo", theme=gr.themes.Monochrome(), css=css, ) as demo: with gr.Row(equal_height=True): if hf_space: header = """ # threestudio Text-to-3D Web Demo <div> <a style="display: inline-block;" href="https://github.com/threestudio-project/threestudio"><img src="https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white"></a> <a style="display: inline-block;" href="https://huggingface.co/spaces/bennyguo/threestudio?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space%20to%20skip%20the%20queue-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a> </div> ### Usage - Select a model from the dropdown menu. If you duplicate this space and would like to use models based on DeepFloyd-IF, you need to [accept the license](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) and set `HUGGING_FACE_HUB_TOKEN` in `Repository secrets` in your space setting. You may also set `TS_SELF_DEPLOY` to enable changing arbitrary configurations. - Input a text prompt and hit the `Run` button to start. - Video and mesh export (not supported for SJC and Latent-NeRF) are available after the training process is finished. - **IMPORTANT NOTE: Keep this tab active when running the model.** """ else: header = """ # threestudio Text-to-3D Web Demo ### Usage - Select a model from the dropdown menu. - Input a text prompt and hit the `Run` button to start. - Video and mesh export (not supported for SJC and Latent-NeRF) are available after the training process is finished. - **IMPORTANT NOTE: Keep this tab active when running the model.** """ gr.Markdown(header) with gr.Row(equal_height=False): pid = gr.State() with gr.Column(scale=1): # generation status status = gr.Textbox( value="Hit the Run button to start.", label="Status", lines=1, max_lines=1, ) # model selection dropdown model_selector = gr.Dropdown( value=model_list[0], choices=model_list, label="Select a model", ) # prompt input prompt_input = gr.Textbox(value=DEFAULT_PROMPT, label="Input prompt") # guidance scale slider guidance_scale_input = gr.Slider( minimum=0.0, maximum=100.0, value=model_config[model_selector.value][ "obj" ].system.guidance.guidance_scale, step=0.5, label="Guidance scale", ) # seed slider seed_input = gr.Slider( minimum=0, maximum=2147483647, value=0, step=1, label="Seed" ) max_steps_input = gr.Slider( minimum=1, maximum=20000 if self_deploy else 5000, value=10000 if self_deploy else 5000, step=1, label="Number of training steps", ) save_ckpt_checkbox = gr.Checkbox( value=save_ckpt, label="Save Checkpoints", visible=False, interactive=False, ) save_root_state = gr.State(value=save_root) # full config viewer with gr.Accordion( "See full configurations", open=False, elem_id="config-accordion" ): config_editor = gr.Code( value=model_config[model_selector.value]["yaml"], language="yaml", lines=10, interactive=self_deploy, # disable editing if in HF space ) # load config on model selection change model_selector.change( fn=on_model_selector_change, inputs=model_selector, outputs=[config_editor, guidance_scale_input], queue=False, ) run_btn = gr.Button(value="Run", variant="primary") stop_btn = gr.Button(value="Stop", variant="stop", visible=False) with gr.Column(scale=1): with gr.Accordion( "See terminal logs", open=False, elem_id="logs-accordion" ): # logs logs = gr.Textbox(label="Logs", lines=10) # validation image display output_image = gr.Image(value=None, label="Image") # testing video display output_video = gr.Video(value=None, label="Video") # export mesh display output_mesh = gr.Model3D(value=None, label="3D Mesh") run_event = run_btn.click( fn=run, inputs=[ model_selector, config_editor, prompt_input, guidance_scale_input, seed_input, max_steps_input, save_ckpt_checkbox, save_root_state, ], outputs=[ pid, status, logs, output_image, output_video, output_mesh, run_btn, stop_btn, ], ) stop_btn.click( fn=stop_run, inputs=[pid], outputs=[run_btn, stop_btn], cancels=[run_event], queue=False, ) launch_args = {"server_port": port} if listen: launch_args["server_name"] = "0.0.0.0" demo.queue(concurrency_count=1).launch(**launch_args) def watch( pid: int, trial_dir: str, alive_timeout: int, wait_timeout: int, check_interval: int, ) -> None: print(f"Spawn watcher for process {pid}") def timeout_handler(signum, frame): exit(1) alive_path = os.path.join(trial_dir, "alive") signal.signal(signal.SIGALRM, timeout_handler) signal.alarm(wait_timeout) def loop_find_progress_file(): while True: if not os.path.exists(alive_path): time.sleep(check_interval) else: signal.alarm(0) return def loop_check_alive(): while True: if not psutil.pid_exists(pid): print(f"Process {pid} not exists, watcher exits.") cleanup_and_exit() try: alive_timestamp = float(open(alive_path).read()) except: continue if time.time() - alive_timestamp > alive_timeout: print(f"Alive timeout for process {pid}, killed.") try: os.kill(pid, signal.SIGKILL) except: print(f"Exception when killing process {pid}.") cleanup_and_exit() time.sleep(check_interval) def cleanup_and_exit(): exit(0) # loop until alive file is found, or alive_timeout is reached loop_find_progress_file() # kill the process if it is not accessed for alive_timeout seconds loop_check_alive() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("operation", type=str, choices=["launch", "watch"]) args, extra = parser.parse_known_args() if args.operation == "launch": parser.add_argument("--listen", action="store_true") parser.add_argument("--hf-space", action="store_true") parser.add_argument("--self-deploy", action="store_true") parser.add_argument("--save-ckpt", action="store_true") # unused parser.add_argument("--save-root", type=str, default=".") parser.add_argument("--port", type=int, default=7860) args = parser.parse_args() launch( args.port, listen=args.listen, hf_space=args.hf_space, self_deploy=args.self_deploy, save_ckpt=args.save_ckpt, save_root=args.save_root, ) if args.operation == "watch": parser.add_argument("--pid", type=int) parser.add_argument("--trial-dir", type=str) parser.add_argument("--alive-timeout", type=int, default=10) parser.add_argument("--wait-timeout", type=int, default=10) parser.add_argument("--check-interval", type=int, default=1) args = parser.parse_args() watch( args.pid, args.trial_dir, alive_timeout=args.alive_timeout, wait_timeout=args.wait_timeout, check_interval=args.check_interval, )
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/launch.py
launch.py
import argparse import contextlib import logging import os import sys class ColoredFilter(logging.Filter): """ A logging filter to add color to certain log levels. """ RESET = "\033[0m" RED = "\033[31m" GREEN = "\033[32m" YELLOW = "\033[33m" BLUE = "\033[34m" MAGENTA = "\033[35m" CYAN = "\033[36m" COLORS = { "WARNING": YELLOW, "INFO": GREEN, "DEBUG": BLUE, "CRITICAL": MAGENTA, "ERROR": RED, } RESET = "\x1b[0m" def __init__(self): super().__init__() def filter(self, record): if record.levelname in self.COLORS: color_start = self.COLORS[record.levelname] record.levelname = f"{color_start}[{record.levelname}]" record.msg = f"{record.msg}{self.RESET}" return True def main(args, extras) -> None: # set CUDA_VISIBLE_DEVICES if needed, then import pytorch-lightning os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" env_gpus_str = os.environ.get("CUDA_VISIBLE_DEVICES", None) env_gpus = list(env_gpus_str.split(",")) if env_gpus_str else [] selected_gpus = [0] # Always rely on CUDA_VISIBLE_DEVICES if specific GPU ID(s) are specified. # As far as Pytorch Lightning is concerned, we always use all available GPUs # (possibly filtered by CUDA_VISIBLE_DEVICES). devices = -1 if len(env_gpus) > 0: # CUDA_VISIBLE_DEVICES was set already, e.g. within SLURM srun or higher-level script. n_gpus = len(env_gpus) else: selected_gpus = list(args.gpu.split(",")) n_gpus = len(selected_gpus) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu import pytorch_lightning as pl import torch from pytorch_lightning import Trainer from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger from pytorch_lightning.utilities.rank_zero import rank_zero_only if args.typecheck: from jaxtyping import install_import_hook install_import_hook("threestudio", "typeguard.typechecked") import threestudio from threestudio.systems.base import BaseSystem from threestudio.utils.callbacks import ( CodeSnapshotCallback, ConfigSnapshotCallback, CustomProgressBar, ProgressCallback, ) from threestudio.utils.config import ExperimentConfig, load_config from threestudio.utils.misc import get_rank from threestudio.utils.typing import Optional logger = logging.getLogger("pytorch_lightning") if args.verbose: logger.setLevel(logging.DEBUG) for handler in logger.handlers: if handler.stream == sys.stderr: # type: ignore if not args.gradio: handler.setFormatter(logging.Formatter("%(levelname)s %(message)s")) handler.addFilter(ColoredFilter()) else: handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s")) # parse YAML config to OmegaConf cfg: ExperimentConfig cfg = load_config(args.config, cli_args=extras, n_gpus=n_gpus) # set a different seed for each device pl.seed_everything(cfg.seed + get_rank(), workers=True) dm = threestudio.find(cfg.data_type)(cfg.data) system: BaseSystem = threestudio.find(cfg.system_type)( cfg.system, resumed=cfg.resume is not None ) cfg.trial_dir = cfg.trial_dir.split("@")[0] system.set_save_dir(os.path.join(cfg.trial_dir, "save")) if args.gradio: fh = logging.FileHandler(os.path.join(cfg.trial_dir, "logs")) fh.setLevel(logging.INFO) if args.verbose: fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter("[%(levelname)s] %(message)s")) logger.addHandler(fh) callbacks = [] if args.train: callbacks += [ ModelCheckpoint( dirpath=os.path.join(cfg.trial_dir, "ckpts"), **cfg.checkpoint ), LearningRateMonitor(logging_interval="step"), CodeSnapshotCallback( os.path.join(cfg.trial_dir, "code"), use_version=False ), ConfigSnapshotCallback( args.config, cfg, os.path.join(cfg.trial_dir, "configs"), use_version=False, ), ] if args.gradio: callbacks += [ ProgressCallback(save_path=os.path.join(cfg.trial_dir, "progress")) ] else: callbacks += [CustomProgressBar(refresh_rate=1)] def write_to_text(file, lines): with open(file, "w") as f: for line in lines: f.write(line + "\n") loggers = [] if args.train: # make tensorboard logging dir to suppress warning rank_zero_only( lambda: os.makedirs(os.path.join(cfg.trial_dir, "tb_logs"), exist_ok=True) )() loggers += [ TensorBoardLogger(cfg.trial_dir, name="tb_logs"), CSVLogger(cfg.trial_dir, name="csv_logs"), ] + system.get_loggers() rank_zero_only( lambda: write_to_text( os.path.join(cfg.trial_dir, "cmd.txt"), ["python " + " ".join(sys.argv), str(args)], ) )() trainer = Trainer( callbacks=callbacks, logger=loggers, inference_mode=False, accelerator="gpu", devices=devices, **cfg.trainer, ) def set_system_status(system: BaseSystem, ckpt_path: Optional[str]): if ckpt_path is None: return ckpt = torch.load(ckpt_path, map_location="cpu") system.set_resume_status(ckpt["epoch"], ckpt["global_step"]) if args.train: trainer.fit(system, datamodule=dm, ckpt_path=cfg.resume) trainer.test(system, datamodule=dm) if args.gradio: # also export assets if in gradio mode trainer.predict(system, datamodule=dm) elif args.validate: # manually set epoch and global_step as they cannot be automatically resumed set_system_status(system, cfg.resume) trainer.validate(system, datamodule=dm, ckpt_path=cfg.resume) elif args.test: # manually set epoch and global_step as they cannot be automatically resumed set_system_status(system, cfg.resume) trainer.test(system, datamodule=dm, ckpt_path=cfg.resume) elif args.export: set_system_status(system, cfg.resume) trainer.predict(system, datamodule=dm, ckpt_path=cfg.resume) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", required=True, help="path to config file") parser.add_argument( "--gpu", default="0", help="GPU(s) to be used. 0 means use the 1st available GPU. " "1,2 means use the 2nd and 3rd available GPU. " "If CUDA_VISIBLE_DEVICES is set before calling `launch.py`, " "this argument is ignored and all available GPUs are always used.", ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--train", action="store_true") group.add_argument("--validate", action="store_true") group.add_argument("--test", action="store_true") group.add_argument("--export", action="store_true") parser.add_argument( "--gradio", action="store_true", help="if true, run in gradio mode" ) parser.add_argument( "--verbose", action="store_true", help="if true, set logging level to DEBUG" ) parser.add_argument( "--typecheck", action="store_true", help="whether to enable dynamic type checking", ) args, extras = parser.parse_known_args() if args.gradio: # FIXME: no effect, stdout is not captured with contextlib.redirect_stdout(sys.stderr): main(args, extras) else: main(args, extras)
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/Reward3D.py
Reward3D/Reward3D.py
''' * Adapted from ImageReward (https://github.com/THUDM/ImageReward) ''' import os import torch import torch.nn as nn from PIL import Image from .models.BLIP.blip_pretrain import BLIP_Pretrain from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize try: from torchvision.transforms import InterpolationMode BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC def _convert_image_to_rgb(image): return image.convert("RGB") def _transform(n_px): return Compose([ Resize(n_px, interpolation=BICUBIC), CenterCrop(n_px), _convert_image_to_rgb, ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ]) class Scorer(nn.Module): def __init__(self): super(Scorer, self).__init__() self.fc1 = nn.Linear(512, 256) self.dropout1 = nn.Dropout(p=0.2) self.fc2 = nn.Linear(256, 64) self.dropout2 = nn.Dropout(p=0.2) self.fc3 = nn.Linear(64, 1) def forward(self, x): x = self.fc1(x) x = self.dropout1(x) x = self.fc2(x) x = self.dropout2(x) rewards = self.fc3(x) return rewards class MLP(nn.Module): def __init__(self, input_size): super().__init__() self.input_size = input_size self.layers = nn.Sequential( nn.Linear(self.input_size, 1024), nn.Dropout(0.2), nn.Linear(1024, 128), nn.Dropout(0.2), nn.Linear(128, 64), nn.Dropout(0.1), nn.Linear(64, 16), nn.Linear(16, 1) ) for name, param in self.layers.named_parameters(): if 'weight' in name: nn.init.normal_(param, mean=0.0, std=1.0/(self.input_size+1)) if 'bias' in name: nn.init.constant_(param, val=0) def forward(self, input,get_feature=None): if get_feature ==None: return self.layers(input) else: for layer in self.layers: input = layer(input) if isinstance(layer, nn.Linear) and input.size(1) == get_feature: return input class CrossViewFusion(nn.Module): def __init__(self, hidden_dim=768, num_views=4): super().__init__() self.num_views = num_views self.self_attn = nn.MultiheadAttention( embed_dim=hidden_dim, num_heads=12, dropout=0.1, batch_first=False ) self.fusion_gate = nn.Sequential( nn.Linear(2*hidden_dim, hidden_dim), nn.Sigmoid() ) def forward(self, x): total_samples, dim = x.shape bs = total_samples // self.num_views seq_features = x.view(bs, self.num_views, dim).permute(1, 0, 2) attn_output, _ = self.self_attn( query=seq_features, key=seq_features, value=seq_features ) combined = torch.cat([seq_features, attn_output], dim=-1) gate = self.fusion_gate(combined) fused_seq = gate * attn_output + (1 - gate) * seq_features return fused_seq.mean(dim=0) class Reward3D(nn.Module): def __init__(self, med_config, device='cpu'): super().__init__() self.device = device self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) self.preprocess = _transform(224) self.cross_view_adapter = CrossViewFusion(hidden_dim=768, num_views=4) self.mlp = MLP(768) self.mean = 0.16717362830052426 self.std = 5 self.freeze_except_cross_adapter() def freeze_except_cross_adapter(self): for param in self.parameters(): param.requires_grad = False for param in self.cross_view_adapter.parameters(): param.requires_grad = True def forward(self, image,prompt_ids, prompt_attention_mask): image_embeds = self.blip.visual_encoder(image) image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device) text_output = self.blip.text_encoder(prompt_ids, attention_mask = prompt_attention_mask, encoder_hidden_states = image_embeds, encoder_attention_mask = image_atts, return_dict = True, ) view_features = text_output.last_hidden_state[:,0,:] fused_feature = self.cross_view_adapter(view_features) rewards = self.mlp(fused_feature) rewards = (rewards - self.mean) / self.std return rewards class Reward3D_(nn.Module): def __init__(self, med_config, device='cpu'): super().__init__() self.device = device self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) self.preprocess = _transform(224) self.mlp = MLP(768) self.Scorer = Scorer() self.Scorer.eval() self.mean = 0.16717362830052426 self.std = 1.0333394966054072 def forward(self, image,prompt_ids, prompt_attention_mask): image_embeds = self.blip.visual_encoder(image) image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device) text_output = self.blip.text_encoder(prompt_ids, attention_mask = prompt_attention_mask, encoder_hidden_states = image_embeds, encoder_attention_mask = image_atts, return_dict = True, ) view_features = text_output.last_hidden_state[:,0,:] rw_features = self.mlp(view_features,get_feature=128) rw_features = rw_features.view(rw_features.shape[0]//4, 4, 128) rw_features = torch.cat([rw_features[:, i, :] for i in range(4)], dim=1) rewards = self.Scorer(rw_features) rewards = (rewards - self.mean) / self.std return rewards
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/__init__.py
Reward3D/__init__.py
from .BLIP import * from .Reward3D import *
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/BLIP/vit.py
Reward3D/BLIP/vit.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) * Based on timm code base * https://github.com/rwightman/pytorch-image-models/tree/master/timm ''' import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import _cfg, PatchEmbed from timm.models.registry import register_model from timm.models.layers import trunc_normal_, DropPath from timm.models.helpers import named_apply, adapt_input_conv from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_gradients = None self.attention_map = None def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def forward(self, x, register_hook=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) if register_hook: self.save_attention_map(attn) attn.register_hook(self.save_attn_gradients) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if use_grad_checkpointing: self.attn = checkpoint_wrapper(self.attn) self.mlp = checkpoint_wrapper(self.mlp) def forward(self, x, register_hook=False): x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, use_grad_checkpointing=False, ckpt_layer=0): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer """ super().__init__() self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) ) for i in range(depth)]) self.norm = norm_layer(embed_dim) trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward(self, x, register_blk=-1): B = x.shape[0] x = self.patch_embed(x) cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) x = x + self.pos_embed[:,:x.size(1),:] x = self.pos_drop(x) for i,blk in enumerate(self.blocks): x = blk(x, register_blk==i) x = self.norm(x) return x @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): # interpolate position embedding embedding_size = pos_embed_checkpoint.shape[-1] num_patches = visual_encoder.patch_embed.num_patches num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) if orig_size!=new_size: # class_token and dist_token are kept unchanged extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) return new_pos_embed else: return pos_embed_checkpoint
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/BLIP/blip_pretrain.py
Reward3D/BLIP/blip_pretrain.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) ''' import transformers transformers.logging.set_verbosity_error() from torch import nn import os from .med import BertConfig, BertModel from .blip import create_vit, init_tokenizer class BLIP_Pretrain(nn.Module): def __init__(self, med_config = "med_config.json", image_size = 224, vit = 'base', vit_grad_ckpt = False, vit_ckpt_layer = 0, embed_dim = 256, queue_size = 57600, momentum = 0.995, ): """ Args: med_config (str): path for the mixture of encoder-decoder model's configuration file image_size (int): input image size vit (str): model size of vision transformer """ super().__init__() self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0) self.tokenizer = init_tokenizer() encoder_config = BertConfig.from_json_file(med_config) encoder_config.encoder_width = vision_width self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False) text_width = self.text_encoder.config.hidden_size self.vision_proj = nn.Linear(vision_width, embed_dim) self.text_proj = nn.Linear(text_width, embed_dim)
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/BLIP/med.py
Reward3D/BLIP/med.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) * Based on huggingface code base * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert ''' import math from typing import Tuple import torch from torch import Tensor, device, nn import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from transformers.models.bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_width, self.all_head_size) self.value = nn.Linear(config.encoder_width, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BertSelfAttention(config, is_cross_attention) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.layer_num = layer_num if self.config.add_cross_attention: self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, mode=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if mode=='multimodal': assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, mode='multimodal', ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, mode=mode, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, mode=mode, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, is_decoder=False, mode='multimodal', ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] batch_size, seq_length = input_shape device = encoder_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if type(encoder_hidden_states) == list: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if type(encoder_attention_mask) == list: encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
true
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/BLIP/blip.py
Reward3D/BLIP/blip.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) ''' import warnings warnings.filterwarnings("ignore") import torch import os from urllib.parse import urlparse from timm.models.hub import download_cached_file from transformers import BertTokenizer from .vit import VisionTransformer, interpolate_pos_embed def init_tokenizer(): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') tokenizer.add_special_tokens({'bos_token':'[DEC]'}) tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] return tokenizer def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): assert vit in ['base', 'large'], "vit parameter must be base or large" if vit=='base': vision_width = 768 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0 or drop_path_rate ) elif vit=='large': vision_width = 1024 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0.1 or drop_path_rate ) return visual_encoder, vision_width def is_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def load_checkpoint(model,url_or_filename): if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location='cpu') elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location='cpu') else: raise RuntimeError('checkpoint url or path is invalid') state_dict = checkpoint['model'] state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], model.visual_encoder_m) for key in model.state_dict().keys(): if key in state_dict.keys(): if state_dict[key].shape!=model.state_dict()[key].shape: print(key, ": ", state_dict[key].shape, ', ', model.state_dict()[key].shape) del state_dict[key] msg = model.load_state_dict(state_dict,strict=False) print('load checkpoint from %s'%url_or_filename) return model,msg
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/BLIP/__init__.py
Reward3D/BLIP/__init__.py
from .blip_pretrain import *
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/BLIPScore.py
Reward3D/models/BLIPScore.py
import os import torch import torch.nn as nn import torch.nn.functional as F from PIL import Image from Reward3D.models.BLIP.blip import load_checkpoint from Reward3D.models.BLIP.blip_pretrain import BLIP_Pretrain from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize try: from torchvision.transforms import InterpolationMode BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC def _convert_image_to_rgb(image): return image.convert("RGB") def _transform(n_px): return Compose([ Resize(n_px, interpolation=BICUBIC), CenterCrop(n_px), _convert_image_to_rgb, ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ]) class BLIPScore(nn.Module): def __init__(self, med_config, device='cpu'): super().__init__() self.device = device self.preprocess = _transform(224) self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) def score(self, prompt, image_path): if (type(image_path).__name__=='list'): _, rewards = self.inference_rank(prompt, image_path) return rewards # text encode text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) text_output = self.blip.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') txt_feature = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:])) # image encode pil_image = Image.open(image_path) image = self.preprocess(pil_image).unsqueeze(0).to(self.device) image_embeds = self.blip.visual_encoder(image) image_features = F.normalize(self.blip.vision_proj(image_embeds[:,0,:]), dim=-1) # score rewards = torch.sum(torch.mul(txt_feature, image_features), dim=1, keepdim=True) return rewards.detach().cpu().numpy().item() def inference_rank(self, prompt, generations_list): text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) text_output = self.blip.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') txt_feature = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:])) txt_set = [] img_set = [] for generations in generations_list: # image encode img_path = generations pil_image = Image.open(img_path) image = self.preprocess(pil_image).unsqueeze(0).to(self.device) image_embeds = self.blip.visual_encoder(image) image_features = F.normalize(self.blip.vision_proj(image_embeds[:,0,:]), dim=-1) img_set.append(image_features) txt_set.append(txt_feature) txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] rewards = torch.sum(torch.mul(txt_features, img_features), dim=1, keepdim=True) rewards = torch.squeeze(rewards) _, rank = torch.sort(rewards, dim=0, descending=True) _, indices = torch.sort(rank, dim=0) indices = indices + 1 return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist()
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/__init__.py
Reward3D/models/__init__.py
from .BLIPScore import * from .BLIP import *
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/BLIP/vit.py
Reward3D/models/BLIP/vit.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) * Based on timm code base * https://github.com/rwightman/pytorch-image-models/tree/master/timm ''' import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import _cfg, PatchEmbed from timm.models.registry import register_model from timm.models.layers import trunc_normal_, DropPath from timm.models.helpers import named_apply, adapt_input_conv from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_gradients = None self.attention_map = None def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def forward(self, x, register_hook=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) if register_hook: self.save_attention_map(attn) attn.register_hook(self.save_attn_gradients) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if use_grad_checkpointing: self.attn = checkpoint_wrapper(self.attn) self.mlp = checkpoint_wrapper(self.mlp) def forward(self, x, register_hook=False): x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, use_grad_checkpointing=False, ckpt_layer=0): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer """ super().__init__() self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) ) for i in range(depth)]) self.norm = norm_layer(embed_dim) trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward(self, x, register_blk=-1): B = x.shape[0] x = self.patch_embed(x) cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) x = x + self.pos_embed[:,:x.size(1),:] x = self.pos_drop(x) for i,blk in enumerate(self.blocks): x = blk(x, register_blk==i) x = self.norm(x) return x @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): # interpolate position embedding embedding_size = pos_embed_checkpoint.shape[-1] num_patches = visual_encoder.patch_embed.num_patches num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) if orig_size!=new_size: # class_token and dist_token are kept unchanged extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) return new_pos_embed else: return pos_embed_checkpoint
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/BLIP/blip_pretrain.py
Reward3D/models/BLIP/blip_pretrain.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) ''' import transformers transformers.logging.set_verbosity_error() from torch import nn import os from .med import BertConfig, BertModel from .blip import create_vit, init_tokenizer class BLIP_Pretrain(nn.Module): def __init__(self, med_config = "med_config.json", image_size = 224, vit = 'base', vit_grad_ckpt = False, vit_ckpt_layer = 0, embed_dim = 256, queue_size = 57600, momentum = 0.995, ): """ Args: med_config (str): path for the mixture of encoder-decoder model's configuration file image_size (int): input image size vit (str): model size of vision transformer """ super().__init__() self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0) self.tokenizer = init_tokenizer() encoder_config = BertConfig.from_json_file(med_config) encoder_config.encoder_width = vision_width self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False) text_width = self.text_encoder.config.hidden_size self.vision_proj = nn.Linear(vision_width, embed_dim) self.text_proj = nn.Linear(text_width, embed_dim)
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/BLIP/med.py
Reward3D/models/BLIP/med.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) * Based on huggingface code base * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert ''' import math from typing import Tuple import torch from torch import Tensor, device, nn import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from transformers.models.bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_width, self.all_head_size) self.value = nn.Linear(config.encoder_width, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BertSelfAttention(config, is_cross_attention) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.layer_num = layer_num if self.config.add_cross_attention: self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, mode=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if mode=='multimodal': assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, mode='multimodal', ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, mode=mode, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, mode=mode, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, is_decoder=False, mode='multimodal', ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] batch_size, seq_length = input_shape device = encoder_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if type(encoder_hidden_states) == list: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if type(encoder_attention_mask) == list: encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
true
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/BLIP/blip.py
Reward3D/models/BLIP/blip.py
''' * Adapted from BLIP (https://github.com/salesforce/BLIP) ''' import warnings warnings.filterwarnings("ignore") import torch import os from urllib.parse import urlparse from timm.models.hub import download_cached_file from transformers import BertTokenizer from .vit import VisionTransformer, interpolate_pos_embed def init_tokenizer(): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') tokenizer.add_special_tokens({'bos_token':'[DEC]'}) tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] return tokenizer def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): assert vit in ['base', 'large'], "vit parameter must be base or large" if vit=='base': vision_width = 768 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0 or drop_path_rate ) elif vit=='large': vision_width = 1024 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0.1 or drop_path_rate ) return visual_encoder, vision_width def is_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def load_checkpoint(model,url_or_filename): if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location='cpu') elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location='cpu') else: raise RuntimeError('checkpoint url or path is invalid') state_dict = checkpoint['model'] state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], model.visual_encoder_m) for key in model.state_dict().keys(): if key in state_dict.keys(): if state_dict[key].shape!=model.state_dict()[key].shape: print(key, ": ", state_dict[key].shape, ', ', model.state_dict()[key].shape) del state_dict[key] msg = model.load_state_dict(state_dict,strict=False) print('load checkpoint from %s'%url_or_filename) return model,msg
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/Reward3D/models/BLIP/__init__.py
Reward3D/models/BLIP/__init__.py
#from .blip_pretrain import * from .blip import load_checkpoint from .blip_pretrain import BLIP_Pretrain
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/zero123.py
extern/zero123.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math import warnings from typing import Any, Callable, Dict, List, Optional, Union import PIL import torch import torchvision.transforms.functional as TF from diffusers.configuration_utils import ConfigMixin, FrozenDict, register_to_config from diffusers.image_processor import VaeImageProcessor from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.modeling_utils import ModelMixin from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import deprecate, is_accelerate_available, logging from diffusers.utils.torch_utils import randn_tensor from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection logger = logging.get_logger(__name__) # pylint: disable=invalid-name class CLIPCameraProjection(ModelMixin, ConfigMixin): """ A Projection layer for CLIP embedding and camera embedding. Parameters: embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `clip_embed` additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings + additional_embeddings`. """ @register_to_config def __init__(self, embedding_dim: int = 768, additional_embeddings: int = 4): super().__init__() self.embedding_dim = embedding_dim self.additional_embeddings = additional_embeddings self.input_dim = self.embedding_dim + self.additional_embeddings self.output_dim = self.embedding_dim self.proj = torch.nn.Linear(self.input_dim, self.output_dim) def forward( self, embedding: torch.FloatTensor, ): """ The [`PriorTransformer`] forward method. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, input_dim)`): The currently input embeddings. Returns: The output embedding projection (`torch.FloatTensor` of shape `(batch_size, output_dim)`). """ proj_embedding = self.proj(embedding) return proj_embedding class Zero123Pipeline(DiffusionPipeline): r""" Pipeline to generate variations from an input image using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. image_encoder ([`CLIPVisionModelWithProjection`]): Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ # TODO: feature_extractor is required to encode images (if they are in PIL format), # we should give a descriptive message if the pipeline doesn't have one. _optional_components = ["safety_checker"] def __init__( self, vae: AutoencoderKL, image_encoder: CLIPVisionModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, clip_camera_projection: CLIPCameraProjection, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warn( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr( unet.config, "_diffusers_version" ) and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse( "0.9.0.dev0" ) is_unet_sample_size_less_64 = ( hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate( "sample_size<64", "1.0.0", deprecation_message, standard_warn=False ) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, clip_camera_projection=clip_camera_projection, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. """ if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [ self.unet, self.image_encoder, self.vae, self.safety_checker, ]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_image( self, image, elevation, azimuth, distance, device, num_images_per_prompt, do_classifier_free_guidance, clip_image_embeddings=None, image_camera_embeddings=None, ): dtype = next(self.image_encoder.parameters()).dtype if image_camera_embeddings is None: if image is None: assert clip_image_embeddings is not None image_embeddings = clip_image_embeddings.to(device=device, dtype=dtype) else: if not isinstance(image, torch.Tensor): image = self.feature_extractor( images=image, return_tensors="pt" ).pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) bs_embed, seq_len, _ = image_embeddings.shape if isinstance(elevation, float): elevation = torch.as_tensor( [elevation] * bs_embed, dtype=dtype, device=device ) if isinstance(azimuth, float): azimuth = torch.as_tensor( [azimuth] * bs_embed, dtype=dtype, device=device ) if isinstance(distance, float): distance = torch.as_tensor( [distance] * bs_embed, dtype=dtype, device=device ) camera_embeddings = torch.stack( [ torch.deg2rad(elevation), torch.sin(torch.deg2rad(azimuth)), torch.cos(torch.deg2rad(azimuth)), distance, ], dim=-1, )[:, None, :] image_embeddings = torch.cat([image_embeddings, camera_embeddings], dim=-1) # project (image, camera) embeddings to the same dimension as clip embeddings image_embeddings = self.clip_camera_projection(image_embeddings) else: image_embeddings = image_camera_embeddings.to(device=device, dtype=dtype) bs_embed, seq_len, _ = image_embeddings.shape # duplicate image embeddings for each generation per prompt, using mps friendly method image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view( bs_embed * num_images_per_prompt, seq_len, -1 ) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess( image, output_type="pil" ) else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor( feature_extractor_input, return_tensors="pt" ).to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): warnings.warn( "The decode_latents method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor instead", FutureWarning, ) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set( inspect.signature(self.scheduler.step).parameters.keys() ) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set( inspect.signature(self.scheduler.step).parameters.keys() ) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, image, height, width, callback_steps): # TODO: check image size or adjust image size to (height, width) if height % 8 != 0 or width % 8 != 0: raise ValueError( f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor( shape, generator=generator, device=device, dtype=dtype ) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def _get_latent_model_input( self, latents: torch.FloatTensor, image: Optional[ Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor] ], num_images_per_prompt: int, do_classifier_free_guidance: bool, image_latents: Optional[torch.FloatTensor] = None, ): if isinstance(image, PIL.Image.Image): image_pt = TF.to_tensor(image).unsqueeze(0).to(latents) elif isinstance(image, list): image_pt = torch.stack([TF.to_tensor(img) for img in image], dim=0).to( latents ) elif isinstance(image, torch.Tensor): image_pt = image else: image_pt = None if image_pt is None: assert image_latents is not None image_pt = image_latents.repeat_interleave(num_images_per_prompt, dim=0) else: image_pt = image_pt * 2.0 - 1.0 # scale to [-1, 1] # FIXME: encoded latents should be multiplied with self.vae.config.scaling_factor # but zero123 was not trained this way image_pt = self.vae.encode(image_pt).latent_dist.mode() image_pt = image_pt.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: latent_model_input = torch.cat( [ torch.cat([latents, latents], dim=0), torch.cat([torch.zeros_like(image_pt), image_pt], dim=0), ], dim=1, ) else: latent_model_input = torch.cat([latents, image_pt], dim=1) return latent_model_input @torch.no_grad() def __call__( self, image: Optional[ Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor] ] = None, elevation: Optional[Union[float, torch.FloatTensor]] = None, azimuth: Optional[Union[float, torch.FloatTensor]] = None, distance: Optional[Union[float, torch.FloatTensor]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 3.0, num_images_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, clip_image_embeddings: Optional[torch.FloatTensor] = None, image_camera_embeddings: Optional[torch.FloatTensor] = None, image_latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function invoked when calling the pipeline for generation. Args: image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The image or images to guide the image generation. If you provide a tensor, it needs to comply with the configuration of [this](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json) `CLIPImageProcessor` height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct # TODO: check input elevation, azimuth, and distance # TODO: check image, clip_image_embeddings, image_latents self.check_inputs(image, height, width, callback_steps) # 2. Define call parameters if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) elif isinstance(image, torch.Tensor): batch_size = image.shape[0] else: assert image_latents is not None assert ( clip_image_embeddings is not None or image_camera_embeddings is not None ) batch_size = image_latents.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input image if isinstance(image, PIL.Image.Image) or isinstance(image, list): pil_image = image elif isinstance(image, torch.Tensor): pil_image = [TF.to_pil_image(image[i]) for i in range(image.shape[0])] else: pil_image = None image_embeddings = self._encode_image( pil_image, elevation, azimuth, distance, device, num_images_per_prompt, do_classifier_free_guidance, clip_image_embeddings, image_camera_embeddings, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables # num_channels_latents = self.unet.config.in_channels num_channels_latents = 4 # FIXME: hard-coded latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = self._get_latent_model_input( latents, image, num_images_per_prompt, do_classifier_free_guidance, image_latents, ) latent_model_input = self.scheduler.scale_model_input( latent_model_input, t ) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=image_embeddings, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * ( noise_pred_text - noise_pred_uncond ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, **extra_step_kwargs ).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ( (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 ): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == "latent": image = self.vae.decode( latents / self.vae.config.scaling_factor, return_dict=False )[0] image, has_nsfw_concept = self.run_safety_checker( image, device, image_embeddings.dtype ) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=do_denormalize ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput( images=image, nsfw_content_detected=has_nsfw_concept )
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/__init__.py
extern/__init__.py
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/extras.py
extern/ldm_zero123/extras.py
import logging from contextlib import contextmanager from pathlib import Path import torch from omegaconf import OmegaConf from extern.ldm_zero123.util import instantiate_from_config @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): """ A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. https://gist.github.com/simon-weber/7853144 """ # two kind-of hacks here: # * can't get the highest logging level in effect => delegate to the user # * can't get the current module-level override => use an undocumented # (but non-private!) interface previous_level = logging.root.manager.disable logging.disable(highest_level) try: yield finally: logging.disable(previous_level) def load_training_dir(train_dir, device, epoch="last"): """Load a checkpoint and config from training directory""" train_dir = Path(train_dir) ckpt = list(train_dir.rglob(f"*{epoch}.ckpt")) assert len(ckpt) == 1, f"found {len(ckpt)} matching ckpt files" config = list(train_dir.rglob(f"*-project.yaml")) assert len(ckpt) > 0, f"didn't find any config in {train_dir}" if len(config) > 1: print(f"found {len(config)} matching config files") config = sorted(config)[-1] print(f"selecting {config}") else: config = config[0] config = OmegaConf.load(config) return load_model_from_config(config, ckpt[0], device) def load_model_from_config(config, ckpt, device="cpu", verbose=False): """Loads a model from config and a ckpt if config is a path will use omegaconf to load """ if isinstance(config, (str, Path)): config = OmegaConf.load(config) with all_logging_disabled(): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") global_step = pl_sd["global_step"] sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") model.to(device) model.eval() model.cond_stage_model.device = device return model
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/guidance.py
extern/ldm_zero123/guidance.py
import abc from typing import List, Tuple import matplotlib.pyplot as plt import numpy as np import torch from IPython.display import clear_output from scipy import interpolate class GuideModel(torch.nn.Module, abc.ABC): def __init__(self) -> None: super().__init__() @abc.abstractmethod def preprocess(self, x_img): pass @abc.abstractmethod def compute_loss(self, inp): pass class Guider(torch.nn.Module): def __init__(self, sampler, guide_model, scale=1.0, verbose=False): """Apply classifier guidance Specify a guidance scale as either a scalar Or a schedule as a list of tuples t = 0->1 and scale, e.g. [(0, 10), (0.5, 20), (1, 50)] """ super().__init__() self.sampler = sampler self.index = 0 self.show = verbose self.guide_model = guide_model self.history = [] if isinstance(scale, (Tuple, List)): times = np.array([x[0] for x in scale]) values = np.array([x[1] for x in scale]) self.scale_schedule = {"times": times, "values": values} else: self.scale_schedule = float(scale) self.ddim_timesteps = sampler.ddim_timesteps self.ddpm_num_timesteps = sampler.ddpm_num_timesteps def get_scales(self): if isinstance(self.scale_schedule, float): return len(self.ddim_timesteps) * [self.scale_schedule] interpolater = interpolate.interp1d( self.scale_schedule["times"], self.scale_schedule["values"] ) fractional_steps = np.array(self.ddim_timesteps) / self.ddpm_num_timesteps return interpolater(fractional_steps) def modify_score(self, model, e_t, x, t, c): # TODO look up index by t scale = self.get_scales()[self.index] if scale == 0: return e_t sqrt_1ma = self.sampler.ddim_sqrt_one_minus_alphas[self.index].to(x.device) with torch.enable_grad(): x_in = x.detach().requires_grad_(True) pred_x0 = model.predict_start_from_noise(x_in, t=t, noise=e_t) x_img = model.first_stage_model.decode((1 / 0.18215) * pred_x0) inp = self.guide_model.preprocess(x_img) loss = self.guide_model.compute_loss(inp) grads = torch.autograd.grad(loss.sum(), x_in)[0] correction = grads * scale if self.show: clear_output(wait=True) print( loss.item(), scale, correction.abs().max().item(), e_t.abs().max().item(), ) self.history.append( [ loss.item(), scale, correction.min().item(), correction.max().item(), ] ) plt.imshow( (inp[0].detach().permute(1, 2, 0).clamp(-1, 1).cpu() + 1) / 2 ) plt.axis("off") plt.show() plt.imshow(correction[0][0].detach().cpu()) plt.axis("off") plt.show() e_t_mod = e_t - sqrt_1ma * correction if self.show: fig, axs = plt.subplots(1, 3) axs[0].imshow(e_t[0][0].detach().cpu(), vmin=-2, vmax=+2) axs[1].imshow(e_t_mod[0][0].detach().cpu(), vmin=-2, vmax=+2) axs[2].imshow(correction[0][0].detach().cpu(), vmin=-2, vmax=+2) plt.show() self.index += 1 return e_t_mod
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/util.py
extern/ldm_zero123/util.py
import importlib import os import time from inspect import isfunction import cv2 import matplotlib.pyplot as plt import numpy as np import PIL import torch import torchvision from PIL import Image, ImageDraw, ImageFont from torch import optim def pil_rectangle_crop(im): width, height = im.size # Get dimensions if width <= height: left = 0 right = width top = (height - width) / 2 bottom = (height + width) / 2 else: top = 0 bottom = height left = (width - height) / 2 bottom = (width + height) / 2 # Crop the center of the image im = im.crop((left, top, right, bottom)) return im def log_txt_as_img(wh, xc, size=10): # wh a tuple of (width, height) # xc a list of captions to plot b = len(xc) txts = list() for bi in range(b): txt = Image.new("RGB", wh, color="white") draw = ImageDraw.Draw(txt) font = ImageFont.truetype("data/DejaVuSans.ttf", size=size) nc = int(40 * (wh[0] / 256)) lines = "\n".join( xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc) ) try: draw.text((0, 0), lines, fill="black", font=font) except UnicodeEncodeError: print("Cant encode string for logging. Skipping.") txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txts.append(txt) txts = np.stack(txts) txts = torch.tensor(txts) return txts def ismap(x): if not isinstance(x, torch.Tensor): return False return (len(x.shape) == 4) and (x.shape[1] > 3) def isimage(x): if not isinstance(x, torch.Tensor): return False return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) def exists(x): return x is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def mean_flat(tensor): """ https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape)))) def count_params(model, verbose=False): total_params = sum(p.numel() for p in model.parameters()) if verbose: print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") return total_params def instantiate_from_config(config): if not "target" in config: if config == "__is_first_stage__": return None elif config == "__is_unconditional__": return None raise KeyError("Expected key `target` to instantiate.") return get_obj_from_str(config["target"])(**config.get("params", dict())) def get_obj_from_str(string, reload=False): module, cls = string.rsplit(".", 1) if reload: module_imp = importlib.import_module(module) importlib.reload(module_imp) return getattr(importlib.import_module(module, package=None), cls) class AdamWwithEMAandWings(optim.Optimizer): # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 def __init__( self, params, lr=1.0e-3, betas=(0.9, 0.999), eps=1.0e-8, # TODO: check hyperparameters before using weight_decay=1.0e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code ema_power=1.0, param_names=(), ): """AdamW that saves EMA versions of the parameters.""" if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= ema_decay <= 1.0: raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, ema_power=ema_power, param_names=param_names, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("amsgrad", False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] ema_params_with_grad = [] state_sums = [] max_exp_avg_sqs = [] state_steps = [] amsgrad = group["amsgrad"] beta1, beta2 = group["betas"] ema_decay = group["ema_decay"] ema_power = group["ema_power"] for p in group["params"]: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError("AdamW does not support sparse gradients") grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like( p, memory_format=torch.preserve_format ) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like( p, memory_format=torch.preserve_format ) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like( p, memory_format=torch.preserve_format ) # Exponential moving average of parameter values state["param_exp_avg"] = p.detach().float().clone() exp_avgs.append(state["exp_avg"]) exp_avg_sqs.append(state["exp_avg_sq"]) ema_params_with_grad.append(state["param_exp_avg"]) if amsgrad: max_exp_avg_sqs.append(state["max_exp_avg_sq"]) # update the steps for each param group update state["step"] += 1 # record the step after step update state_steps.append(state["step"]) optim._functional.adamw( params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=group["lr"], weight_decay=group["weight_decay"], eps=group["eps"], maximize=False, ) cur_ema_decay = min(ema_decay, 1 - state["step"] ** -ema_power) for param, ema_param in zip(params_with_grad, ema_params_with_grad): ema_param.mul_(cur_ema_decay).add_( param.float(), alpha=1 - cur_ema_decay ) return loss
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/lr_scheduler.py
extern/ldm_zero123/lr_scheduler.py
import numpy as np class LambdaWarmUpCosineScheduler: """ note: use with a base_lr of 1.0 """ def __init__( self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0, ): self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min self.lr_max = lr_max self.lr_max_decay_steps = max_decay_steps self.last_lr = 0.0 self.verbosity_interval = verbosity_interval def schedule(self, n, **kwargs): if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") if n < self.lr_warm_up_steps: lr = ( self.lr_max - self.lr_start ) / self.lr_warm_up_steps * n + self.lr_start self.last_lr = lr return lr else: t = (n - self.lr_warm_up_steps) / ( self.lr_max_decay_steps - self.lr_warm_up_steps ) t = min(t, 1.0) lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( 1 + np.cos(t * np.pi) ) self.last_lr = lr return lr def __call__(self, n, **kwargs): return self.schedule(n, **kwargs) class LambdaWarmUpCosineScheduler2: """ supports repeated iterations, configurable via lists note: use with a base_lr of 1.0. """ def __init__( self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0 ): assert ( len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) ) self.lr_warm_up_steps = warm_up_steps self.f_start = f_start self.f_min = f_min self.f_max = f_max self.cycle_lengths = cycle_lengths self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) self.last_f = 0.0 self.verbosity_interval = verbosity_interval def find_in_interval(self, n): interval = 0 for cl in self.cum_cycles[1:]: if n <= cl: return interval interval += 1 def schedule(self, n, **kwargs): cycle = self.find_in_interval(n) n = n - self.cum_cycles[cycle] if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print( f"current step: {n}, recent lr-multiplier: {self.last_f}, " f"current cycle {cycle}" ) if n < self.lr_warm_up_steps[cycle]: f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[ cycle ] * n + self.f_start[cycle] self.last_f = f return f else: t = (n - self.lr_warm_up_steps[cycle]) / ( self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle] ) t = min(t, 1.0) f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( 1 + np.cos(t * np.pi) ) self.last_f = f return f def __call__(self, n, **kwargs): return self.schedule(n, **kwargs) class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): def schedule(self, n, **kwargs): cycle = self.find_in_interval(n) n = n - self.cum_cycles[cycle] if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print( f"current step: {n}, recent lr-multiplier: {self.last_f}, " f"current cycle {cycle}" ) if n < self.lr_warm_up_steps[cycle]: f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[ cycle ] * n + self.f_start[cycle] self.last_f = f return f else: f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * ( self.cycle_lengths[cycle] - n ) / (self.cycle_lengths[cycle]) self.last_f = f return f
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/thirdp/psp/id_loss.py
extern/ldm_zero123/thirdp/psp/id_loss.py
# https://github.com/eladrich/pixel2style2pixel import torch from torch import nn from extern.ldm_zero123.thirdp.psp.model_irse import Backbone class IDFeatures(nn.Module): def __init__(self, model_path): super(IDFeatures, self).__init__() print("Loading ResNet ArcFace") self.facenet = Backbone( input_size=112, num_layers=50, drop_ratio=0.6, mode="ir_se" ) self.facenet.load_state_dict(torch.load(model_path, map_location="cpu")) self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) self.facenet.eval() def forward(self, x, crop=False): # Not sure of the image range here if crop: x = torch.nn.functional.interpolate(x, (256, 256), mode="area") x = x[:, :, 35:223, 32:220] x = self.face_pool(x) x_feats = self.facenet(x) return x_feats
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/thirdp/psp/model_irse.py
extern/ldm_zero123/thirdp/psp/model_irse.py
# https://github.com/eladrich/pixel2style2pixel from torch.nn import ( BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear, Module, PReLU, Sequential, ) from extern.ldm_zero123.thirdp.psp.helpers import ( Flatten, bottleneck_IR, bottleneck_IR_SE, get_blocks, l2_norm, ) """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir": unit_module = bottleneck_IR elif mode == "ir_se": unit_module = bottleneck_IR_SE self.input_layer = Sequential( Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64) ) if input_size == 112: self.output_layer = Sequential( BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 512), BatchNorm1d(512, affine=affine), ) else: self.output_layer = Sequential( BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(512 * 14 * 14, 512), BatchNorm1d(512, affine=affine), ) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module( bottleneck.in_channel, bottleneck.depth, bottleneck.stride ) ) self.body = Sequential(*modules) def forward(self, x): x = self.input_layer(x) x = self.body(x) x = self.output_layer(x) return l2_norm(x) def IR_50(input_size): """Constructs a ir-50 model.""" model = Backbone(input_size, num_layers=50, mode="ir", drop_ratio=0.4, affine=False) return model def IR_101(input_size): """Constructs a ir-101 model.""" model = Backbone( input_size, num_layers=100, mode="ir", drop_ratio=0.4, affine=False ) return model def IR_152(input_size): """Constructs a ir-152 model.""" model = Backbone( input_size, num_layers=152, mode="ir", drop_ratio=0.4, affine=False ) return model def IR_SE_50(input_size): """Constructs a ir_se-50 model.""" model = Backbone( input_size, num_layers=50, mode="ir_se", drop_ratio=0.4, affine=False ) return model def IR_SE_101(input_size): """Constructs a ir_se-101 model.""" model = Backbone( input_size, num_layers=100, mode="ir_se", drop_ratio=0.4, affine=False ) return model def IR_SE_152(input_size): """Constructs a ir_se-152 model.""" model = Backbone( input_size, num_layers=152, mode="ir_se", drop_ratio=0.4, affine=False ) return model
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/thirdp/psp/helpers.py
extern/ldm_zero123/thirdp/psp/helpers.py
# https://github.com/eladrich/pixel2style2pixel from collections import namedtuple import torch from torch.nn import ( AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid, ) """ ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Flatten(Module): def forward(self, input): return input.view(input.size(0), -1) def l2_norm(input, axis=1): norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output class Bottleneck(namedtuple("Block", ["in_channel", "depth", "stride"])): """A named tuple describing a ResNet block.""" def get_block(in_channel, depth, num_units, stride=2): return [Bottleneck(in_channel, depth, stride)] + [ Bottleneck(depth, depth, 1) for i in range(num_units - 1) ] def get_blocks(num_layers): if num_layers == 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3), ] elif num_layers == 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3), ] elif num_layers == 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3), ] else: raise ValueError( "Invalid number of layers: {}. Must be one of [50, 100, 152]".format( num_layers ) ) return blocks class SEModule(Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels // reduction, kernel_size=1, padding=0, bias=False ) self.relu = ReLU(inplace=True) self.fc2 = Conv2d( channels // reduction, channels, kernel_size=1, padding=0, bias=False ) self.sigmoid = Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class bottleneck_IR(Module): def __init__(self, in_channel, depth, stride): super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth), ) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), ) def forward(self, x): shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module): def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth), ) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16), ) def forward(self, x): shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/autoencoder.py
extern/ldm_zero123/models/autoencoder.py
from contextlib import contextmanager import pytorch_lightning as pl import torch import torch.nn.functional as F from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from extern.ldm_zero123.modules.diffusionmodules.model import Decoder, Encoder from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, ) from extern.ldm_zero123.util import instantiate_from_config class VQModel(pl.LightningModule): def __init__( self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, batch_resize_range=None, scheduler_config=None, lr_g_factor=1.0, remap=None, sane_index_shape=False, # tell vector quantizer to return indices as bhw use_ema=False, ): super().__init__() self.embed_dim = embed_dim self.n_embed = n_embed self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) self.quantize = VectorQuantizer( n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape, ) self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) if colorize_nlabels is not None: assert type(colorize_nlabels) == int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor self.batch_resize_range = batch_resize_range if self.batch_resize_range is not None: print( f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}." ) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.scheduler_config = scheduler_config self.lr_g_factor = lr_g_factor @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") print(f"Unexpected Keys: {unexpected}") def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self) def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) quant, emb_loss, info = self.quantize(h) return quant, emb_loss, info def encode_to_prequant(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, quant): quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec def decode_code(self, code_b): quant_b = self.quantize.embed_code(code_b) dec = self.decode(quant_b) return dec def forward(self, input, return_pred_indices=False): quant, diff, (_, _, ind) = self.encode(input) dec = self.decode(quant) if return_pred_indices: return dec, diff, ind return dec, diff def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() if self.batch_resize_range is not None: lower_size = self.batch_resize_range[0] upper_size = self.batch_resize_range[1] if self.global_step <= 4: # do the first few batches with max size to avoid later oom new_resize = upper_size else: new_resize = np.random.choice( np.arange(lower_size, upper_size + 16, 16) ) if new_resize != x.shape[2]: x = F.interpolate(x, size=new_resize, mode="bicubic") x = x.detach() return x def training_step(self, batch, batch_idx, optimizer_idx): # https://github.com/pytorch/pytorch/issues/37142 # try not to fool the heuristics x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) if optimizer_idx == 0: # autoencode aeloss, log_dict_ae = self.loss( qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train", predicted_indices=ind, ) self.log_dict( log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True ) return aeloss if optimizer_idx == 1: # discriminator discloss, log_dict_disc = self.loss( qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train", ) self.log_dict( log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True ) return discloss def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") return log_dict def _validation_step(self, batch, batch_idx, suffix=""): x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) aeloss, log_dict_ae = self.loss( qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split="val" + suffix, predicted_indices=ind, ) discloss, log_dict_disc = self.loss( qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split="val" + suffix, predicted_indices=ind, ) rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] self.log( f"val{suffix}/rec_loss", rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True, ) self.log( f"val{suffix}/aeloss", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True, ) if version.parse(pl.__version__) >= version.parse("1.4.0"): del log_dict_ae[f"val{suffix}/rec_loss"] self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr_d = self.learning_rate lr_g = self.lr_g_factor * self.learning_rate print("lr_d", lr_d) print("lr_g", lr_g) opt_ae = torch.optim.Adam( list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(self.quantize.parameters()) + list(self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()), lr=lr_g, betas=(0.5, 0.9), ) opt_disc = torch.optim.Adam( self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9) ) if self.scheduler_config is not None: scheduler = instantiate_from_config(self.scheduler_config) print("Setting up LambdaLR scheduler...") scheduler = [ { "scheduler": LambdaLR(opt_ae, lr_lambda=scheduler.schedule), "interval": "step", "frequency": 1, }, { "scheduler": LambdaLR(opt_disc, lr_lambda=scheduler.schedule), "interval": "step", "frequency": 1, }, ] return [opt_ae, opt_disc], scheduler return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if only_inputs: log["inputs"] = x return log xrec, _ = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["inputs"] = x log["reconstructions"] = xrec if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) log["reconstructions_ema"] = xrec_ema return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 return x class VQModelInterface(VQModel): def __init__(self, embed_dim, *args, **kwargs): super().__init__(embed_dim=embed_dim, *args, **kwargs) self.embed_dim = embed_dim def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, h, force_not_quantize=False): # also go through quantization layer if not force_not_quantize: quant, emb_loss, info = self.quantize(h) else: quant = h quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec class AutoencoderKL(pl.LightningModule): def __init__( self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) assert ddconfig["double_z"] self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels) == int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] self.load_state_dict(sd, strict=False) print(f"Restored from {path}") def encode(self, x): h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) return dec def forward(self, input, sample_posterior=True): posterior = self.encode(input) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode(z) return dec, posterior def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() return x def training_step(self, batch, batch_idx, optimizer_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) if optimizer_idx == 0: # train encoder+decoder+logvar aeloss, log_dict_ae = self.loss( inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train", ) self.log( "aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, ) self.log_dict( log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False ) return aeloss if optimizer_idx == 1: # train the discriminator discloss, log_dict_disc = self.loss( inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train", ) self.log( "discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, ) self.log_dict( log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False ) return discloss def validation_step(self, batch, batch_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) aeloss, log_dict_ae = self.loss( inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split="val", ) discloss, log_dict_disc = self.loss( inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split="val", ) self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate opt_ae = torch.optim.Adam( list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()), lr=lr, betas=(0.5, 0.9), ) opt_disc = torch.optim.Adam( self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9) ) return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight @torch.no_grad() def log_images(self, batch, only_inputs=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if not only_inputs: xrec, posterior = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["samples"] = self.decode(torch.randn_like(posterior.sample())) log["reconstructions"] = xrec log["inputs"] = x return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 return x class IdentityFirstStage(torch.nn.Module): def __init__(self, *args, vq_interface=False, **kwargs): self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff super().__init__() def encode(self, x, *args, **kwargs): return x def decode(self, x, *args, **kwargs): return x def quantize(self, x, *args, **kwargs): if self.vq_interface: return x, None, [None, None, None] return x def forward(self, x, *args, **kwargs): return x
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/diffusion/ddim.py
extern/ldm_zero123/models/diffusion/ddim.py
"""SAMPLING ONLY.""" from functools import partial import numpy as np import torch from einops import rearrange from tqdm import tqdm from extern.ldm_zero123.models.diffusion.sampling_util import ( norm_thresholding, renorm_thresholding, spatial_norm_thresholding, ) from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, ) class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def to(self, device): """Same as to in torch module Don't really underestand why this isn't a module in the first place""" for k, v in self.__dict__.items(): if isinstance(v, torch.Tensor): new_v = getattr(self, k).to(device) setattr(self, k, new_v) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule( self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True ): self.ddim_timesteps = make_ddim_timesteps( ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose, ) alphas_cumprod = self.model.alphas_cumprod assert ( alphas_cumprod.shape[0] == self.ddpm_num_timesteps ), "alphas have to be defined for each timestep" to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer("betas", to_torch(self.model.betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer( "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) ) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer( "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), ) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose, ) self.register_buffer("ddim_sigmas", ddim_sigmas) self.register_buffer("ddim_alphas", ddim_alphas) self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) ) self.register_buffer( "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps ) @torch.no_grad() def sample( self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, **kwargs, ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print( f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" ) else: if conditioning.shape[0] != batch_size: print( f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}" ) self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DDIM sampling is {size}, eta {eta}') samples, intermediates = self.ddim_sampling( conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) return samples, intermediates @torch.no_grad() def ddim_sampling( self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, t_start=-1, ): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = ( self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps ) elif timesteps is not None and not ddim_use_original_steps: subset_end = ( int( min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0] ) - 1 ) timesteps = self.ddim_timesteps[:subset_end] timesteps = timesteps[:t_start] intermediates = {"x_inter": [img], "pred_x0": [img]} time_range = ( reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps) ) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] # print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample( x0, ts ) # TODO: deterministic forward pass? img = img_orig * mask + (1.0 - mask) * img outs = self.p_sample_ddim( img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) img, pred_x0 = outs if callback: img = callback(i, img, pred_x0) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates["x_inter"].append(img) intermediates["pred_x0"].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim( self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, ): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.0: e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [ torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k])) ] else: c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) else: c_in = torch.cat([unconditional_conditioning, c]) e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) if score_corrector is not None: assert self.model.parameterization == "eps" e_t = score_corrector.modify_score( self.model, e_t, x, t, c, **corrector_kwargs ) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = ( self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev ) sqrt_one_minus_alphas = ( self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas ) sigmas = ( self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas ) # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full( (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device ) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() print(t, sqrt_one_minus_at, a_t) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode( self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, ): num_reference_steps = ( self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] ) assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc="Encoding Image"): t = torch.full( (x0.shape[0],), i, device=self.model.device, dtype=torch.long ) if unconditional_guidance_scale == 1.0: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model( torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c)), ), 2, ) noise_pred = e_t_uncond + unconditional_guidance_scale * ( noise_pred - e_t_uncond ) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = ( alphas_next[i].sqrt() * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred ) x_next = xt_weighted + weighted_noise_pred if ( return_intermediates and i % (num_steps // return_intermediates) == 0 and i < num_steps - 1 ): intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) out = {"x_encoded": x_next, "intermediate_steps": inter_steps} if return_intermediates: out.update({"intermediates": intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0) return ( extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise ) @torch.no_grad() def decode( self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, use_original_steps=False, ): timesteps = ( np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps ) timesteps = timesteps[:t_start] time_range = np.flip(timesteps) total_steps = timesteps.shape[0] # print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc="Decoding image", total=total_steps) x_dec = x_latent for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full( (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long ) x_dec, _ = self.p_sample_ddim( x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, ) return x_dec
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/diffusion/ddpm.py
extern/ldm_zero123/models/diffusion/ddpm.py
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ import itertools from contextlib import contextmanager, nullcontext from functools import partial import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, ) __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold(
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
true
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/diffusion/__init__.py
extern/ldm_zero123/models/diffusion/__init__.py
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/diffusion/sampling_util.py
extern/ldm_zero123/models/diffusion/sampling_util.py
import numpy as np import torch def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions. From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError( f"input has {x.ndim} dims but target_dims is {target_dims}, which is less" ) return x[(...,) + (None,) * dims_to_append] def renorm_thresholding(x0, value): # renorm pred_max = x0.max() pred_min = x0.min() pred_x0 = (x0 - pred_min) / (pred_max - pred_min) # 0 ... 1 pred_x0 = 2 * pred_x0 - 1.0 # -1 ... 1 s = torch.quantile(rearrange(pred_x0, "b ... -> b (...)").abs(), value, dim=-1) s.clamp_(min=1.0) s = s.view(-1, *((1,) * (pred_x0.ndim - 1))) # clip by threshold # pred_x0 = pred_x0.clamp(-s, s) / s # needs newer pytorch # TODO bring back to pure-gpu with min/max # temporary hack: numpy on cpu pred_x0 = ( np.clip(pred_x0.cpu().numpy(), -s.cpu().numpy(), s.cpu().numpy()) / s.cpu().numpy() ) pred_x0 = torch.tensor(pred_x0).to(self.model.device) # re.renorm pred_x0 = (pred_x0 + 1.0) / 2.0 # 0 ... 1 pred_x0 = (pred_max - pred_min) * pred_x0 + pred_min # orig range return pred_x0 def norm_thresholding(x0, value): s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) return x0 * (value / s) def spatial_norm_thresholding(x0, value): # b c h w s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) return x0 * (value / s)
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/diffusion/classifier.py
extern/ldm_zero123/models/diffusion/classifier.py
import os from copy import deepcopy from glob import glob import pytorch_lightning as pl import torch from einops import rearrange from natsort import natsorted from omegaconf import OmegaConf from torch.nn import functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from extern.ldm_zero123.modules.diffusionmodules.openaimodel import ( EncoderUNetModel, UNetModel, ) from extern.ldm_zero123.util import ( default, instantiate_from_config, ismap, log_txt_as_img, ) __models__ = {"class_label": EncoderUNetModel, "segmentation": UNetModel} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self class NoisyLatentImageClassifier(pl.LightningModule): def __init__( self, diffusion_path, num_classes, ckpt_path=None, pool="attention", label_key=None, diffusion_ckpt_path=None, scheduler_config=None, weight_decay=1.0e-2, log_steps=10, monitor="val/loss", *args, **kwargs, ): super().__init__(*args, **kwargs) self.num_classes = num_classes # get latest config of diffusion model diffusion_config = natsorted( glob(os.path.join(diffusion_path, "configs", "*-project.yaml")) )[-1] self.diffusion_config = OmegaConf.load(diffusion_config).model self.diffusion_config.params.ckpt_path = diffusion_ckpt_path self.load_diffusion() self.monitor = monitor self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 self.log_time_interval = self.diffusion_model.num_timesteps // log_steps self.log_steps = log_steps self.label_key = ( label_key if not hasattr(self.diffusion_model, "cond_stage_key") else self.diffusion_model.cond_stage_key ) assert ( self.label_key is not None ), "label_key neither in diffusion model nor in model.params" if self.label_key not in __models__: raise NotImplementedError() self.load_classifier(ckpt_path, pool) self.scheduler_config = scheduler_config self.use_scheduler = self.scheduler_config is not None self.weight_decay = weight_decay def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def load_diffusion(self): model = instantiate_from_config(self.diffusion_config) self.diffusion_model = model.eval() self.diffusion_model.train = disabled_train for param in self.diffusion_model.parameters(): param.requires_grad = False def load_classifier(self, ckpt_path, pool): model_config = deepcopy(self.diffusion_config.params.unet_config.params) model_config.in_channels = ( self.diffusion_config.params.unet_config.params.out_channels ) model_config.out_channels = self.num_classes if self.label_key == "class_label": model_config.pool = pool self.model = __models__[self.label_key](**model_config) if ckpt_path is not None: print( "#####################################################################" ) print(f'load from ckpt "{ckpt_path}"') print( "#####################################################################" ) self.init_from_ckpt(ckpt_path) @torch.no_grad() def get_x_noisy(self, x, t, noise=None): noise = default(noise, lambda: torch.randn_like(x)) continuous_sqrt_alpha_cumprod = None if self.diffusion_model.use_continuous_noise: continuous_sqrt_alpha_cumprod = ( self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) ) # todo: make sure t+1 is correct here return self.diffusion_model.q_sample( x_start=x, t=t, noise=noise, continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod, ) def forward(self, x_noisy, t, *args, **kwargs): return self.model(x_noisy, t) @torch.no_grad() def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x @torch.no_grad() def get_conditioning(self, batch, k=None): if k is None: k = self.label_key assert k is not None, "Needs to provide label key" targets = batch[k].to(self.device) if self.label_key == "segmentation": targets = rearrange(targets, "b h w c -> b c h w") for down in range(self.numd): h, w = targets.shape[-2:] targets = F.interpolate(targets, size=(h // 2, w // 2), mode="nearest") # targets = rearrange(targets,'b c h w -> b h w c') return targets def compute_top_k(self, logits, labels, k, reduction="mean"): _, top_ks = torch.topk(logits, k, dim=1) if reduction == "mean": return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() elif reduction == "none": return (top_ks == labels[:, None]).float().sum(dim=-1) def on_train_epoch_start(self): # save some memory self.diffusion_model.model.to("cpu") @torch.no_grad() def write_logs(self, loss, logits, targets): log_prefix = "train" if self.training else "val" log = {} log[f"{log_prefix}/loss"] = loss.mean() log[f"{log_prefix}/acc@1"] = self.compute_top_k( logits, targets, k=1, reduction="mean" ) log[f"{log_prefix}/acc@5"] = self.compute_top_k( logits, targets, k=5, reduction="mean" ) self.log_dict( log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True ) self.log("loss", log[f"{log_prefix}/loss"], prog_bar=True, logger=False) self.log( "global_step", self.global_step, logger=False, on_epoch=False, prog_bar=True ) lr = self.optimizers().param_groups[0]["lr"] self.log("lr_abs", lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) def shared_step(self, batch, t=None): x, *_ = self.diffusion_model.get_input( batch, k=self.diffusion_model.first_stage_key ) targets = self.get_conditioning(batch) if targets.dim() == 4: targets = targets.argmax(dim=1) if t is None: t = torch.randint( 0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device ).long() else: t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() x_noisy = self.get_x_noisy(x, t) logits = self(x_noisy, t) loss = F.cross_entropy(logits, targets, reduction="none") self.write_logs(loss.detach(), logits.detach(), targets.detach()) loss = loss.mean() return loss, logits, x_noisy, targets def training_step(self, batch, batch_idx): loss, *_ = self.shared_step(batch) return loss def reset_noise_accs(self): self.noisy_acc = { t: {"acc@1": [], "acc@5": []} for t in range( 0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t ) } def on_validation_start(self): self.reset_noise_accs() @torch.no_grad() def validation_step(self, batch, batch_idx): loss, *_ = self.shared_step(batch) for t in self.noisy_acc: _, logits, _, targets = self.shared_step(batch, t) self.noisy_acc[t]["acc@1"].append( self.compute_top_k(logits, targets, k=1, reduction="mean") ) self.noisy_acc[t]["acc@5"].append( self.compute_top_k(logits, targets, k=5, reduction="mean") ) return loss def configure_optimizers(self): optimizer = AdamW( self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay, ) if self.use_scheduler: scheduler = instantiate_from_config(self.scheduler_config) print("Setting up LambdaLR scheduler...") scheduler = [ { "scheduler": LambdaLR(optimizer, lr_lambda=scheduler.schedule), "interval": "step", "frequency": 1, } ] return [optimizer], scheduler return optimizer @torch.no_grad() def log_images(self, batch, N=8, *args, **kwargs): log = dict() x = self.get_input(batch, self.diffusion_model.first_stage_key) log["inputs"] = x y = self.get_conditioning(batch) if self.label_key == "class_label": y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log["labels"] = y if ismap(y): log["labels"] = self.diffusion_model.to_rgb(y) for step in range(self.log_steps): current_time = step * self.log_time_interval _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) log[f"inputs@t{current_time}"] = x_noisy pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) pred = rearrange(pred, "b h w c -> b c h w") log[f"pred@t{current_time}"] = self.diffusion_model.to_rgb(pred) for key in log: log[key] = log[key][:N] return log
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/models/diffusion/plms.py
extern/ldm_zero123/models/diffusion/plms.py
"""SAMPLING ONLY.""" from functools import partial import numpy as np import torch from tqdm import tqdm from extern.ldm_zero123.models.diffusion.sampling_util import norm_thresholding from extern.ldm_zero123.modules.diffusionmodules.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, ) class PLMSSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule( self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True ): if ddim_eta != 0: raise ValueError("ddim_eta must be 0 for PLMS") self.ddim_timesteps = make_ddim_timesteps( ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose, ) alphas_cumprod = self.model.alphas_cumprod assert ( alphas_cumprod.shape[0] == self.ddpm_num_timesteps ), "alphas have to be defined for each timestep" to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer("betas", to_torch(self.model.betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer( "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) ) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer( "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), ) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose, ) self.register_buffer("ddim_sigmas", ddim_sigmas) self.register_buffer("ddim_alphas", ddim_alphas) self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) ) self.register_buffer( "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps ) @torch.no_grad() def sample( self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, **kwargs, ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print( f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" ) else: if conditioning.shape[0] != batch_size: print( f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}" ) self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) print(f"Data shape for PLMS sampling is {size}") samples, intermediates = self.plms_sampling( conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) return samples, intermediates @torch.no_grad() def plms_sampling( self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, ): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = ( self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps ) elif timesteps is not None and not ddim_use_original_steps: subset_end = ( int( min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0] ) - 1 ) timesteps = self.ddim_timesteps[:subset_end] intermediates = {"x_inter": [img], "pred_x0": [img]} time_range = ( list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps) ) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] print(f"Running PLMS Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc="PLMS Sampler", total=total_steps) old_eps = [] for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) ts_next = torch.full( (b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long, ) if mask is not None: assert x0 is not None img_orig = self.model.q_sample( x0, ts ) # TODO: deterministic forward pass? img = img_orig * mask + (1.0 - mask) * img outs = self.p_sample_plms( img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next, dynamic_threshold=dynamic_threshold, ) img, pred_x0, e_t = outs old_eps.append(e_t) if len(old_eps) >= 4: old_eps.pop(0) if callback: callback(i) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates["x_inter"].append(img) intermediates["pred_x0"].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_plms( self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None, ): b, *_, device = *x.shape, x.device def get_model_output(x, t): if ( unconditional_conditioning is None or unconditional_guidance_scale == 1.0 ): e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [ torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k])) ] else: c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) else: c_in = torch.cat([unconditional_conditioning, c]) e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) if score_corrector is not None: assert self.model.parameterization == "eps" e_t = score_corrector.modify_score( self.model, e_t, x, t, c, **corrector_kwargs ) return e_t alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = ( self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev ) sqrt_one_minus_alphas = ( self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas ) sigmas = ( self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas ) def get_x_prev_and_pred_x0(e_t, index): # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full( (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device ) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 e_t = get_model_output(x, t) if len(old_eps) == 0: # Pseudo Improved Euler (2nd order) x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) e_t_next = get_model_output(x_prev, t_next) e_t_prime = (e_t + e_t_next) / 2 elif len(old_eps) == 1: # 2nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = (3 * e_t - old_eps[-1]) / 2 elif len(old_eps) == 2: # 3nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 elif len(old_eps) >= 3: # 4nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = ( 55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3] ) / 24 x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) return x_prev, pred_x0, e_t
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/ema.py
extern/ldm_zero123/modules/ema.py
import torch from torch import nn class LitEma(nn.Module): def __init__(self, model, decay=0.9999, use_num_upates=True): super().__init__() if decay < 0.0 or decay > 1.0: raise ValueError("Decay must be between 0 and 1") self.m_name2s_name = {} self.register_buffer("decay", torch.tensor(decay, dtype=torch.float32)) self.register_buffer( "num_updates", torch.tensor(0, dtype=torch.int) if use_num_upates else torch.tensor(-1, dtype=torch.int), ) for name, p in model.named_parameters(): if p.requires_grad: # remove as '.'-character is not allowed in buffers s_name = name.replace(".", "") self.m_name2s_name.update({name: s_name}) self.register_buffer(s_name, p.clone().detach().data) self.collected_params = [] def forward(self, model): decay = self.decay if self.num_updates >= 0: self.num_updates += 1 decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) one_minus_decay = 1.0 - decay with torch.no_grad(): m_param = dict(model.named_parameters()) shadow_params = dict(self.named_buffers()) for key in m_param: if m_param[key].requires_grad: sname = self.m_name2s_name[key] shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) shadow_params[sname].sub_( one_minus_decay * (shadow_params[sname] - m_param[key]) ) else: assert not key in self.m_name2s_name def copy_to(self, model): m_param = dict(model.named_parameters()) shadow_params = dict(self.named_buffers()) for key in m_param: if m_param[key].requires_grad: m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) else: assert not key in self.m_name2s_name def store(self, parameters): """ Save the current parameters for restoring later. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be temporarily stored. """ self.collected_params = [param.clone() for param in parameters] def restore(self, parameters): """ Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without affecting the original optimization process. Store the parameters before the `copy_to` method. After validation (or model saving), use this to restore the former parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. """ for c_param, param in zip(self.collected_params, parameters): param.data.copy_(c_param.data)
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/attention.py
extern/ldm_zero123/modules/attention.py
import math from inspect import isfunction import torch import torch.nn.functional as F from einops import rearrange, repeat from torch import einsum, nn from extern.ldm_zero123.modules.diffusionmodules.util import checkpoint def exists(val): return val is not None def uniq(arr): return {el: True for el in arr}.keys() def default(val, d): if exists(val): return val return d() if isfunction(d) else d def max_neg_value(t): return -torch.finfo(t.dtype).max def init_(tensor): dim = tensor.shape[-1] std = 1 / math.sqrt(dim) tensor.uniform_(-std, std) return tensor # feedforward class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = ( nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) if not glu else GEGLU(dim, inner_dim) ) self.net = nn.Sequential( project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) ) def forward(self, x): return self.net(x) def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module def Normalize(in_channels): return torch.nn.GroupNorm( num_groups=32, num_channels=in_channels, eps=1e-6, affine=True ) class LinearAttention(nn.Module): def __init__(self, dim, heads=4, dim_head=32): super().__init__() self.heads = heads hidden_dim = dim_head * heads self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): b, c, h, w = x.shape qkv = self.to_qkv(x) q, k, v = rearrange( qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3 ) k = k.softmax(dim=-1) context = torch.einsum("bhdn,bhen->bhde", k, v) out = torch.einsum("bhde,bhdn->bhen", context, q) out = rearrange( out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w ) return self.to_out(out) class SpatialSelfAttention(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d( in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) self.k = torch.nn.Conv2d( in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) self.v = torch.nn.Conv2d( in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) self.proj_out = torch.nn.Conv2d( in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b, c, h, w = q.shape q = rearrange(q, "b c h w -> b (h w) c") k = rearrange(k, "b c h w -> b c (h w)") w_ = torch.einsum("bij,bjk->bik", q, k) w_ = w_ * (int(c) ** (-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = rearrange(v, "b c h w -> b c (h w)") w_ = rearrange(w_, "b i j -> b j i") h_ = torch.einsum("bij,bjk->bik", v, w_) h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) h_ = self.proj_out(h_) return x + h_ class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) self.scale = dim_head**-0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential( nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) ) def forward(self, x, context=None, mask=None): h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) sim = einsum("b i d, b j d -> b i j", q, k) * self.scale if exists(mask): mask = rearrange(mask, "b ... -> b (...)") max_neg_value = -torch.finfo(sim.dtype).max mask = repeat(mask, "b j -> (b h) () j", h=h) sim.masked_fill_(~mask, max_neg_value) # attention, what we cannot get enough of attn = sim.softmax(dim=-1) out = einsum("b i j, b j d -> b i d", attn, v) out = rearrange(out, "(b h) n d -> b n (h d)", h=h) return self.to_out(out) class BasicTransformerBlock(nn.Module): def __init__( self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True, disable_self_attn=False, ): super().__init__() self.disable_self_attn = disable_self_attn self.attn1 = CrossAttention( query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, context_dim=context_dim if self.disable_self_attn else None, ) # is a self-attention if not self.disable_self_attn self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention( query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout, ) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): return checkpoint( self._forward, (x, context), self.parameters(), self.checkpoint ) def _forward(self, x, context=None): x = ( self.attn1( self.norm1(x), context=context if self.disable_self_attn else None ) + x ) x = self.attn2(self.norm2(x), context=context) + x x = self.ff(self.norm3(x)) + x return x class SpatialTransformer(nn.Module): """ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image """ def __init__( self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None, disable_self_attn=False, ): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) self.proj_in = nn.Conv2d( in_channels, inner_dim, kernel_size=1, stride=1, padding=0 ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim, disable_self_attn=disable_self_attn, ) for d in range(depth) ] ) self.proj_out = zero_module( nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) ) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x x = self.norm(x) x = self.proj_in(x) x = rearrange(x, "b c h w -> b (h w) c").contiguous() for block in self.transformer_blocks: x = block(x, context=context) x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() x = self.proj_out(x) return x + x_in
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/x_transformer.py
extern/ldm_zero123/modules/x_transformer.py
"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" from collections import namedtuple from functools import partial from inspect import isfunction import torch import torch.nn.functional as F from einops import rearrange, reduce, repeat from torch import einsum, nn # constants DEFAULT_DIM_HEAD = 64 Intermediates = namedtuple("Intermediates", ["pre_softmax_attn", "post_softmax_attn"]) LayerIntermediates = namedtuple("Intermediates", ["hiddens", "attn_intermediates"]) class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() self.emb = nn.Embedding(max_seq_len, dim) self.init_() def init_(self): nn.init.normal_(self.emb.weight, std=0.02) def forward(self, x): n = torch.arange(x.shape[1], device=x.device) return self.emb(n)[None, :, :] class FixedPositionalEmbedding(nn.Module): def __init__(self, dim): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) def forward(self, x, seq_dim=1, offset=0): t = ( torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset ) sinusoid_inp = torch.einsum("i , j -> i j", t, self.inv_freq) emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) return emb[None, :, :] # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def always(val): def inner(*args, **kwargs): return val return inner def not_equals(val): def inner(x): return x != val return inner def equals(val): def inner(x): return x == val return inner def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(), dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key( partial(string_begins_with, prefix), d ) kwargs_without_prefix = dict( map(lambda x: (x[0][len(prefix) :], x[1]), tuple(kwargs_with_prefix.items())) ) return kwargs_without_prefix, kwargs # classes class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): x, *rest = self.fn(x, **kwargs) return (x * self.value, *rest) class Rezero(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn self.g = nn.Parameter(torch.zeros(1)) def forward(self, x, **kwargs): x, *rest = self.fn(x, **kwargs) return (x * self.g, *rest) class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-5): super().__init__() self.scale = dim**-0.5 self.eps = eps self.g = nn.Parameter(torch.ones(1)) def forward(self, x): norm = torch.norm(x, dim=-1, keepdim=True) * self.scale return x / norm.clamp(min=self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim, eps=1e-8): super().__init__() self.scale = dim**-0.5 self.eps = eps self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): norm = torch.norm(x, dim=-1, keepdim=True) * self.scale return x / norm.clamp(min=self.eps) * self.g class Residual(nn.Module): def forward(self, x, residual): return x + residual class GRUGating(nn.Module): def __init__(self, dim): super().__init__() self.gru = nn.GRUCell(dim, dim) def forward(self, x, residual): gated_output = self.gru( rearrange(x, "b n d -> (b n) d"), rearrange(residual, "b n d -> (b n) d") ) return gated_output.reshape_as(x) # feedforward class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = ( nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) if not glu else GEGLU(dim, inner_dim) ) self.net = nn.Sequential( project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) ) def forward(self, x): return self.net(x) # attention. class Attention(nn.Module): def __init__( self, dim, dim_head=DEFAULT_DIM_HEAD, heads=8, causal=False, mask=None, talking_heads=False, sparse_topk=None, use_entmax15=False, num_mem_kv=0, dropout=0.0, on_attn=False, ): super().__init__() if use_entmax15: raise NotImplementedError( "Check out entmax activation instead of softmax activation!" ) self.scale = dim_head**-0.5 self.heads = heads self.causal = causal self.mask = mask inner_dim = dim_head * heads self.to_q = nn.Linear(dim, inner_dim, bias=False) self.to_k = nn.Linear(dim, inner_dim, bias=False) self.to_v = nn.Linear(dim, inner_dim, bias=False) self.dropout = nn.Dropout(dropout) # talking heads self.talking_heads = talking_heads if talking_heads: self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) # explicit topk sparse attention self.sparse_topk = sparse_topk # entmax # self.attn_fn = entmax15 if use_entmax15 else F.softmax self.attn_fn = F.softmax # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = ( nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) ) def forward( self, x, context=None, mask=None, context_mask=None, rel_pos=None, sinusoidal_emb=None, prev_attn=None, mem=None, ): b, n, _, h, talking_heads, device = ( *x.shape, self.heads, self.talking_heads, x.device, ) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input if exists(mem): k_input = torch.cat((mem, k_input), dim=-2) v_input = torch.cat((mem, v_input), dim=-2) if exists(sinusoidal_emb): # in shortformer, the query would start at a position offset depending on the past cached memory offset = k_input.shape[-2] - q_input.shape[-2] q_input = q_input + sinusoidal_emb(q_input, offset=offset) k_input = k_input + sinusoidal_emb(k_input) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v)) input_mask = None if any(map(exists, (mask, context_mask))): q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) k_mask = q_mask if not exists(context) else context_mask k_mask = default( k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool() ) q_mask = rearrange(q_mask, "b i -> b () i ()") k_mask = rearrange(k_mask, "b j -> b () () j") input_mask = q_mask * k_mask if self.num_mem_kv > 0: mem_k, mem_v = map( lambda t: repeat(t, "h n d -> b h n d", b=b), (self.mem_k, self.mem_v) ) k = torch.cat((mem_k, k), dim=-2) v = torch.cat((mem_v, v), dim=-2) if exists(input_mask): input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) dots = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale mask_value = max_neg_value(dots) if exists(prev_attn): dots = dots + prev_attn pre_softmax_attn = dots if talking_heads: dots = einsum( "b h i j, h k -> b k i j", dots, self.pre_softmax_proj ).contiguous() if exists(rel_pos): dots = rel_pos(dots) if exists(input_mask): dots.masked_fill_(~input_mask, mask_value) del input_mask if self.causal: i, j = dots.shape[-2:] r = torch.arange(i, device=device) mask = rearrange(r, "i -> () () i ()") < rearrange(r, "j -> () () () j") mask = F.pad(mask, (j - i, 0), value=False) dots.masked_fill_(mask, mask_value) del mask if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: top, _ = dots.topk(self.sparse_topk, dim=-1) vk = top[..., -1].unsqueeze(-1).expand_as(dots) mask = dots < vk dots.masked_fill_(mask, mask_value) del mask attn = self.attn_fn(dots, dim=-1) post_softmax_attn = attn attn = self.dropout(attn) if talking_heads: attn = einsum( "b h i j, h k -> b k i j", attn, self.post_softmax_proj ).contiguous() out = einsum("b h i j, b h j d -> b h i d", attn, v) out = rearrange(out, "b h n d -> b n (h d)") intermediates = Intermediates( pre_softmax_attn=pre_softmax_attn, post_softmax_attn=post_softmax_attn ) return self.to_out(out), intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads=8, causal=False, cross_attend=False, only_cross=False, use_scalenorm=False, use_rmsnorm=False, use_rezero=False, rel_pos_num_buckets=32, rel_pos_max_distance=128, position_infused_attn=False, custom_layers=None, sandwich_coef=None, par_ratio=None, residual_attn=False, cross_residual_attn=False, macaron=False, pre_norm=True, gate_residual=False, **kwargs, ): super().__init__() ff_kwargs, kwargs = groupby_prefix_and_trim("ff_", kwargs) attn_kwargs, _ = groupby_prefix_and_trim("attn_", kwargs) dim_head = attn_kwargs.get("dim_head", DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = position_infused_attn self.pia_pos_emb = ( FixedPositionalEmbedding(dim) if position_infused_attn else None ) self.rotary_pos_emb = always(None) assert ( rel_pos_num_buckets <= rel_pos_max_distance ), "number of relative position buckets must be less than the relative position max distance" self.rel_pos = None self.pre_norm = pre_norm self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm norm_class = RMSNorm if use_rmsnorm else norm_class norm_fn = partial(norm_class, dim) norm_fn = nn.Identity if use_rezero else norm_fn branch_fn = Rezero if use_rezero else None if cross_attend and not only_cross: default_block = ("a", "c", "f") elif cross_attend and only_cross: default_block = ("c", "f") else: default_block = ("a", "f") if macaron: default_block = ("f",) + default_block if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, "par ratio out of range" default_block = tuple(filter(not_equals("f"), default_block)) par_attn = par_depth // par_ratio depth_cut = ( par_depth * 2 // 3 ) # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert ( len(default_block) <= par_width ), "default block is too large for par_ratio" par_block = default_block + ("f",) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ("f",) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert ( sandwich_coef > 0 and sandwich_coef <= depth ), "sandwich coefficient should be less than the depth" layer_types = ( ("a",) * sandwich_coef + default_block * (depth - sandwich_coef) + ("f",) * sandwich_coef ) else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals("a"), layer_types))) for layer_type in self.layer_types: if layer_type == "a": layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) elif layer_type == "c": layer = Attention(dim, heads=heads, **attn_kwargs) elif layer_type == "f": layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f"invalid layer type {layer_type}") if isinstance(layer, Attention) and exists(branch_fn): layer = branch_fn(layer) if gate_residual: residual_fn = GRUGating(dim) else: residual_fn = Residual() self.layers.append(nn.ModuleList([norm_fn(), layer, residual_fn])) def forward( self, x, context=None, mask=None, context_mask=None, mems=None, return_hiddens=False, ): hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers for ind, (layer_type, (norm, block, residual_fn)) in enumerate( zip(self.layer_types, self.layers) ): is_last = ind == (len(self.layers) - 1) if layer_type == "a": hiddens.append(x) layer_mem = mems.pop(0) residual = x if self.pre_norm: x = norm(x) if layer_type == "a": out, inter = block( x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, prev_attn=prev_attn, mem=layer_mem, ) elif layer_type == "c": out, inter = block( x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn, ) elif layer_type == "f": out = block(x) x = residual_fn(out, residual) if layer_type in ("a", "c"): intermediates.append(inter) if layer_type == "a" and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == "c" and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if not self.pre_norm and not is_last: x = norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens=hiddens, attn_intermediates=intermediates ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert "causal" not in kwargs, "cannot set causality on encoder" super().__init__(causal=False, **kwargs) class TransformerWrapper(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim=None, max_mem_len=0.0, emb_dropout=0.0, num_memory_tokens=None, tie_embedding=False, use_pos_emb=True, ): super().__init__() assert isinstance( attn_layers, AttentionLayers ), "attention layers must be one of Encoder or Decoder" dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.num_tokens = num_tokens self.token_emb = nn.Embedding(num_tokens, emb_dim) self.pos_emb = ( AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (use_pos_emb and not attn_layers.has_pos_emb) else always(0) ) self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.norm = nn.LayerNorm(dim) self.init_() self.to_logits = ( nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() ) # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) # let funnel encoder know number of memory tokens, if specified if hasattr(attn_layers, "num_memory_tokens"): attn_layers.num_memory_tokens = num_memory_tokens def init_(self): nn.init.normal_(self.token_emb.weight, std=0.02) def forward( self, x, return_embeddings=False, mask=None, return_mems=False, return_attn=False, mems=None, **kwargs, ): b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens x = self.token_emb(x) x += self.pos_emb(x) x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, "n d -> b n d", b=b) x = torch.cat((mem, x), dim=1) # auto-handle masking after appending memory tokens if exists(mask): mask = F.pad(mask, (num_mem, 0), value=True) x, intermediates = self.attn_layers( x, mask=mask, mems=mems, return_hiddens=True, **kwargs ) x = self.norm(x) mem, x = x[:, :num_mem], x[:, num_mem:] out = self.to_logits(x) if not return_embeddings else x if return_mems: hiddens = intermediates.hiddens new_mems = ( list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens ) new_mems = list( map(lambda t: t[..., -self.max_mem_len :, :].detach(), new_mems) ) return out, new_mems if return_attn: attn_maps = list( map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates) ) return out, attn_maps return out
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/image_degradation/bsrgan.py
extern/ldm_zero123/modules/image_degradation/bsrgan.py
# -*- coding: utf-8 -*- """ # -------------------------------------------- # Super-Resolution # -------------------------------------------- # # Kai Zhang (cskaizhang@gmail.com) # https://github.com/cszn # From 2019/03--2021/08 # -------------------------------------------- """ import random from functools import partial import albumentations import cv2 import numpy as np import scipy import scipy.stats as ss import torch from scipy import ndimage from scipy.interpolate import interp2d from scipy.linalg import orth import extern.ldm_zero123.modules.image_degradation.utils_image as util def modcrop_np(img, sf): """ Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image """ w, h = img.shape[:2] im = np.copy(img) return im[: w - w % sf, : h - h % sf, ...] """ # -------------------------------------------- # anisotropic Gaussian kernels # -------------------------------------------- """ def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum() def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot( np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0]), ) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k def gm_blur_kernel(mean, cov, size=15): center = size / 2.0 + 0.5 k = np.zeros([size, size]) for y in range(size): for x in range(size): cy = y - center + 1 cx = x - center + 1 k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) k = k / np.sum(k) return k def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x def blur(x, k): """ x: image, NxcxHxW k: kernel, Nx1xhxw """ n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode="replicate") k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x def gen_kernel( k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10.0, noise_level=0, ): """ " # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf """ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix lambda_1 = min_var + np.random.rand() * (max_var - min_var) lambda_2 = min_var + np.random.rand() * (max_var - min_var) theta = np.random.rand() * np.pi # random theta noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) Z = np.stack([X, Y], 2)[:, :, :, None] # Calcualte Gaussian for every pixel of the kernel ZZ = Z - MU ZZ_t = ZZ.transpose(0, 1, 3, 2) raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) # shift the kernel so it will be centered # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) # Normalize the kernel and return # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) kernel = raw_kernel / np.sum(raw_kernel) return kernel def fspecial_gaussian(hsize, sigma): hsize = [hsize, hsize] siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] std = sigma [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) arg = -(x * x + y * y) / (2 * std * std) h = np.exp(arg) h[h < scipy.finfo(float).eps * h.max()] = 0 sumh = h.sum() if sumh != 0: h = h / sumh return h def fspecial_laplacian(alpha): alpha = max([0, min([alpha, 1])]) h1 = alpha / (alpha + 1) h2 = (1 - alpha) / (alpha + 1) h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] h = np.array(h) return h def fspecial(filter_type, *args, **kwargs): """ python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py """ if filter_type == "gaussian": return fspecial_gaussian(*args, **kwargs) if filter_type == "laplacian": return fspecial_laplacian(*args, **kwargs) """ # -------------------------------------------- # degradation models # -------------------------------------------- """ def bicubic_degradation(x, sf=3): """ Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image """ x = util.imresize_np(x, scale=1 / sf) return x def srmd_degradation(x, k, sf=3): """blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } """ x = ndimage.filters.convolve( x, np.expand_dims(k, axis=2), mode="wrap" ) # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x def dpsr_degradation(x, k, sf=3): """bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } """ x = bicubic_degradation(x, sf=sf) x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") return x def classical_degradation(x, k, sf=3): """blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image """ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...] def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype("float32") soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img def add_blur(img, sf=4): wd2 = 4.0 + sf wd = 2.0 + 0.2 * sf if random.random() < 0.5: l1 = wd2 * random.random() l2 = wd2 * random.random() k = anisotropic_Gaussian( ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2, ) else: k = fspecial("gaussian", 2 * random.randint(2, 11) + 3, wd * random.random()) img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode="mirror") return img def add_resize(img, sf=4): rnum = np.random.rand() if rnum > 0.8: # up sf1 = random.uniform(1, 2) elif rnum < 0.7: # down sf1 = random.uniform(0.5 / sf, 1) else: sf1 = 1.0 img = cv2.resize( img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) img = np.clip(img, 0.0, 1.0) return img # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): # noise_level = random.randint(noise_level1, noise_level2) # rnum = np.random.rand() # if rnum > 0.6: # add color Gaussian noise # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) # elif rnum < 0.4: # add grayscale Gaussian noise # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) # else: # add noise # L = noise_level2 / 255. # D = np.diag(np.random.rand(3)) # U = orth(np.random.rand(3, 3)) # conv = np.dot(np.dot(np.transpose(U), D), U) # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) # img = np.clip(img, 0.0, 1.0) # return img def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): noise_level = random.randint(noise_level1, noise_level2) rnum = np.random.rand() if rnum > 0.6: # add color Gaussian noise img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype( np.float32 ) elif rnum < 0.4: # add grayscale Gaussian noise img = img + np.random.normal( 0, noise_level / 255.0, (*img.shape[:2], 1) ).astype(np.float32) else: # add noise L = noise_level2 / 255.0 D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img = img + np.random.multivariate_normal( [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img def add_speckle_noise(img, noise_level1=2, noise_level2=25): noise_level = random.randint(noise_level1, noise_level2) img = np.clip(img, 0.0, 1.0) rnum = random.random() if rnum > 0.6: img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype( np.float32 ) elif rnum < 0.4: img += img * np.random.normal( 0, noise_level / 255.0, (*img.shape[:2], 1) ).astype(np.float32) else: L = noise_level2 / 255.0 D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img += img * np.random.multivariate_normal( [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img def add_Poisson_noise(img): img = np.clip((img * 255.0).round(), 0, 255) / 255.0 vals = 10 ** (2 * random.random() + 2.0) # [2, 4] if random.random() < 0.5: img = np.random.poisson(img * vals).astype(np.float32) / vals else: img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.0 noise_gray = ( np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray ) img += noise_gray[:, :, np.newaxis] img = np.clip(img, 0.0, 1.0) return img def add_JPEG_noise(img): quality_factor = random.randint(30, 95) img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) result, encimg = cv2.imencode( ".jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor] ) img = cv2.imdecode(encimg, 1) img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) return img def random_crop(lq, hq, sf=4, lq_patchsize=64): h, w = lq.shape[:2] rnd_h = random.randint(0, h - lq_patchsize) rnd_w = random.randint(0, w - lq_patchsize) lq = lq[rnd_h : rnd_h + lq_patchsize, rnd_w : rnd_w + lq_patchsize, :] rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) hq = hq[ rnd_h_H : rnd_h_H + lq_patchsize * sf, rnd_w_H : rnd_w_H + lq_patchsize * sf, : ] return lq, hq def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f"img size ({h1}X{w1}) is too small!") hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize( img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = ( shuffle_order[idx2], shuffle_order[idx1], ) for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize( img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve( img, np.expand_dims(k_shifted, axis=2), mode="mirror" ) img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize( img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]), ) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq # todo no isp_model? def degradation_bsrgan_variant(image, sf=4, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = image.shape[:2] image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] hq = image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize( image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = ( shuffle_order[idx2], shuffle_order[idx1], ) for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) elif i == 1: image = add_blur(image, sf=sf) elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) image = cv2.resize( image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve( image, np.expand_dims(k_shifted, axis=2), mode="mirror" ) image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize( image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]), ) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) example = {"image": image} return example # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... def degradation_bsrgan_plus( img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None ): """ This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ h1, w1 = img.shape[:2] img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f"img size ({h1}X{w1}) is too small!") if use_sharp: img = add_sharpening(img) hq = img.copy() if random.random() < shuffle_prob: shuffle_order = random.sample(range(13), 13) else: shuffle_order = list(range(13)) # local shuffle for noise, JPEG is always the last one shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_resize(img, sf=sf) elif i == 2: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 3: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 4: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 5: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) elif i == 6: img = add_JPEG_noise(img) elif i == 7: img = add_blur(img, sf=sf) elif i == 8: img = add_resize(img, sf=sf) elif i == 9: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 10: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 11: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 12: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) else: print("check the shuffle!") # resize to desired size img = cv2.resize( img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), interpolation=random.choice([1, 2, 3]), ) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf, lq_patchsize) return img, hq if __name__ == "__main__": print("hey") img = util.imread_uint("utils/test.png", 3) print(img) img = util.uint2single(img) print(img) img = img[:448, :448] h = img.shape[0] // 4 print("resizing to", h) sf = 4 deg_fn = partial(degradation_bsrgan_variant, sf=sf) for i in range(20): print(i) img_lq = deg_fn(img) print(img_lq) img_lq_bicubic = albumentations.SmallestMaxSize( max_size=h, interpolation=cv2.INTER_CUBIC )(image=img)["image"] print(img_lq.shape) print("bicubic", img_lq_bicubic.shape) print(img_hq.shape) lq_nearest = cv2.resize( util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), interpolation=0, ) lq_bicubic_nearest = cv2.resize( util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), interpolation=0, ) img_concat = np.concatenate( [lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1 ) util.imsave(img_concat, str(i) + ".png")
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false
liuff19/DreamReward
https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/image_degradation/bsrgan_light.py
extern/ldm_zero123/modules/image_degradation/bsrgan_light.py
# -*- coding: utf-8 -*- import random from functools import partial import albumentations import cv2 import numpy as np import scipy import scipy.stats as ss import torch from scipy import ndimage from scipy.interpolate import interp2d from scipy.linalg import orth import extern.ldm_zero123.modules.image_degradation.utils_image as util """ # -------------------------------------------- # Super-Resolution # -------------------------------------------- # # Kai Zhang (cskaizhang@gmail.com) # https://github.com/cszn # From 2019/03--2021/08 # -------------------------------------------- """ def modcrop_np(img, sf): """ Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image """ w, h = img.shape[:2] im = np.copy(img) return im[: w - w % sf, : h - h % sf, ...] """ # -------------------------------------------- # anisotropic Gaussian kernels # -------------------------------------------- """ def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum() def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot( np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0]), ) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k def gm_blur_kernel(mean, cov, size=15): center = size / 2.0 + 0.5 k = np.zeros([size, size]) for y in range(size): for x in range(size): cy = y - center + 1 cx = x - center + 1 k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) k = k / np.sum(k) return k def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x def blur(x, k): """ x: image, NxcxHxW k: kernel, Nx1xhxw """ n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode="replicate") k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x def gen_kernel( k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10.0, noise_level=0, ): """ " # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf """ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix lambda_1 = min_var + np.random.rand() * (max_var - min_var) lambda_2 = min_var + np.random.rand() * (max_var - min_var) theta = np.random.rand() * np.pi # random theta noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) Z = np.stack([X, Y], 2)[:, :, :, None] # Calcualte Gaussian for every pixel of the kernel ZZ = Z - MU ZZ_t = ZZ.transpose(0, 1, 3, 2) raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) # shift the kernel so it will be centered # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) # Normalize the kernel and return # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) kernel = raw_kernel / np.sum(raw_kernel) return kernel def fspecial_gaussian(hsize, sigma): hsize = [hsize, hsize] siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] std = sigma [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) arg = -(x * x + y * y) / (2 * std * std) h = np.exp(arg) h[h < scipy.finfo(float).eps * h.max()] = 0 sumh = h.sum() if sumh != 0: h = h / sumh return h def fspecial_laplacian(alpha): alpha = max([0, min([alpha, 1])]) h1 = alpha / (alpha + 1) h2 = (1 - alpha) / (alpha + 1) h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] h = np.array(h) return h def fspecial(filter_type, *args, **kwargs): """ python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py """ if filter_type == "gaussian": return fspecial_gaussian(*args, **kwargs) if filter_type == "laplacian": return fspecial_laplacian(*args, **kwargs) """ # -------------------------------------------- # degradation models # -------------------------------------------- """ def bicubic_degradation(x, sf=3): """ Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image """ x = util.imresize_np(x, scale=1 / sf) return x def srmd_degradation(x, k, sf=3): """blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } """ x = ndimage.convolve( x, np.expand_dims(k, axis=2), mode="wrap" ) # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x def dpsr_degradation(x, k, sf=3): """bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } """ x = bicubic_degradation(x, sf=sf) x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode="wrap") return x def classical_degradation(x, k, sf=3): """blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image """ x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode="wrap") # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...] def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype("float32") soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img def add_blur(img, sf=4): wd2 = 4.0 + sf wd = 2.0 + 0.2 * sf wd2 = wd2 / 4 wd = wd / 4 if random.random() < 0.5: l1 = wd2 * random.random() l2 = wd2 * random.random() k = anisotropic_Gaussian( ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2 ) else: k = fspecial("gaussian", random.randint(2, 4) + 3, wd * random.random()) img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode="mirror") return img def add_resize(img, sf=4): rnum = np.random.rand() if rnum > 0.8: # up sf1 = random.uniform(1, 2) elif rnum < 0.7: # down sf1 = random.uniform(0.5 / sf, 1) else: sf1 = 1.0 img = cv2.resize( img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) img = np.clip(img, 0.0, 1.0) return img # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): # noise_level = random.randint(noise_level1, noise_level2) # rnum = np.random.rand() # if rnum > 0.6: # add color Gaussian noise # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) # elif rnum < 0.4: # add grayscale Gaussian noise # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) # else: # add noise # L = noise_level2 / 255. # D = np.diag(np.random.rand(3)) # U = orth(np.random.rand(3, 3)) # conv = np.dot(np.dot(np.transpose(U), D), U) # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) # img = np.clip(img, 0.0, 1.0) # return img def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): noise_level = random.randint(noise_level1, noise_level2) rnum = np.random.rand() if rnum > 0.6: # add color Gaussian noise img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype( np.float32 ) elif rnum < 0.4: # add grayscale Gaussian noise img = img + np.random.normal( 0, noise_level / 255.0, (*img.shape[:2], 1) ).astype(np.float32) else: # add noise L = noise_level2 / 255.0 D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img = img + np.random.multivariate_normal( [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img def add_speckle_noise(img, noise_level1=2, noise_level2=25): noise_level = random.randint(noise_level1, noise_level2) img = np.clip(img, 0.0, 1.0) rnum = random.random() if rnum > 0.6: img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype( np.float32 ) elif rnum < 0.4: img += img * np.random.normal( 0, noise_level / 255.0, (*img.shape[:2], 1) ).astype(np.float32) else: L = noise_level2 / 255.0 D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) img += img * np.random.multivariate_normal( [0, 0, 0], np.abs(L**2 * conv), img.shape[:2] ).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img def add_Poisson_noise(img): img = np.clip((img * 255.0).round(), 0, 255) / 255.0 vals = 10 ** (2 * random.random() + 2.0) # [2, 4] if random.random() < 0.5: img = np.random.poisson(img * vals).astype(np.float32) / vals else: img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.0 noise_gray = ( np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray ) img += noise_gray[:, :, np.newaxis] img = np.clip(img, 0.0, 1.0) return img def add_JPEG_noise(img): quality_factor = random.randint(80, 95) img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) result, encimg = cv2.imencode( ".jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor] ) img = cv2.imdecode(encimg, 1) img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) return img def random_crop(lq, hq, sf=4, lq_patchsize=64): h, w = lq.shape[:2] rnd_h = random.randint(0, h - lq_patchsize) rnd_w = random.randint(0, w - lq_patchsize) lq = lq[rnd_h : rnd_h + lq_patchsize, rnd_w : rnd_w + lq_patchsize, :] rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) hq = hq[ rnd_h_H : rnd_h_H + lq_patchsize * sf, rnd_w_H : rnd_w_H + lq_patchsize * sf, : ] return lq, hq def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f"img size ({h1}X{w1}) is too small!") hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize( img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = ( shuffle_order[idx2], shuffle_order[idx1], ) for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize( img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.convolve( img, np.expand_dims(k_shifted, axis=2), mode="mirror" ) img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize( img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]), ) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq # todo no isp_model? def degradation_bsrgan_variant(image, sf=4, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = image.shape[:2] image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] hq = image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize( image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = ( shuffle_order[idx2], shuffle_order[idx1], ) for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) # elif i == 1: # image = add_blur(image, sf=sf) if i == 0: pass elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.8: sf1 = random.uniform(1, 2 * sf) image = cv2.resize( image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.convolve( image, np.expand_dims(k_shifted, axis=2), mode="mirror" ) image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize( image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]), ) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) example = {"image": image} return example if __name__ == "__main__": print("hey") img = util.imread_uint("utils/test.png", 3) img = img[:448, :448] h = img.shape[0] // 4 print("resizing to", h) sf = 4 deg_fn = partial(degradation_bsrgan_variant, sf=sf) for i in range(20): print(i) img_hq = img img_lq = deg_fn(img)["image"] img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) print(img_lq) img_lq_bicubic = albumentations.SmallestMaxSize( max_size=h, interpolation=cv2.INTER_CUBIC )(image=img_hq)["image"] print(img_lq.shape) print("bicubic", img_lq_bicubic.shape) print(img_hq.shape) lq_nearest = cv2.resize( util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), interpolation=0, ) lq_bicubic_nearest = cv2.resize( util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), interpolation=0, ) img_concat = np.concatenate( [lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1 ) util.imsave(img_concat, str(i) + ".png")
python
MIT
eeb5c648e6c2a25c8f6f8038edfe75d73c811614
2026-01-05T07:14:33.752935Z
false