text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class DDPMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 1000): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> from diffusers import DDPMPipeline >>> # load model and scheduler >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256") >>> # run pipeline in inference (sample random noise and denoise) >>> image = pipe().images[0] >>> # save image >>> image.save("ddpm_generated_image.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size, int): image_shape = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if self.device.type == "mps": # randn does not work reproducibly on mps image = randn_tensor(image_shape, generator=generator) image = image.to(self.device) else: image = randn_tensor(image_shape, generator=generator, device=self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. compute previous image: x_t -> x_t-1 image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py", "repo_id": "diffusers", "token_count": 2059 }
146
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import math import os from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ....utils import is_note_seq_available from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH if is_note_seq_available(): import note_seq else: raise ImportError("Please install note-seq via `pip install note-seq`") INPUT_FEATURE_LENGTH = 2048 SAMPLE_RATE = 16000 HOP_SIZE = 320 FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) DEFAULT_STEPS_PER_SECOND = 100 DEFAULT_MAX_SHIFT_SECONDS = 10 DEFAULT_NUM_VELOCITY_BINS = 1 SLAKH_CLASS_PROGRAMS = { "Acoustic Piano": 0, "Electric Piano": 4, "Chromatic Percussion": 8, "Organ": 16, "Acoustic Guitar": 24, "Clean Electric Guitar": 26, "Distorted Electric Guitar": 29, "Acoustic Bass": 32, "Electric Bass": 33, "Violin": 40, "Viola": 41, "Cello": 42, "Contrabass": 43, "Orchestral Harp": 46, "Timpani": 47, "String Ensemble": 48, "Synth Strings": 50, "Choir and Voice": 52, "Orchestral Hit": 55, "Trumpet": 56, "Trombone": 57, "Tuba": 58, "French Horn": 60, "Brass Section": 61, "Soprano/Alto Sax": 64, "Tenor Sax": 66, "Baritone Sax": 67, "Oboe": 68, "English Horn": 69, "Bassoon": 70, "Clarinet": 71, "Pipe": 73, "Synth Lead": 80, "Synth Pad": 88, } @dataclasses.dataclass class NoteRepresentationConfig: """Configuration note representations.""" onsets_only: bool include_ties: bool @dataclasses.dataclass class NoteEventData: pitch: int velocity: Optional[int] = None program: Optional[int] = None is_drum: Optional[bool] = None instrument: Optional[int] = None @dataclasses.dataclass class NoteEncodingState: """Encoding state for note transcription, keeping track of active pitches.""" # velocity bin for active pitches and programs active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) @dataclasses.dataclass class EventRange: type: str min_value: int max_value: int @dataclasses.dataclass class Event: type: str value: int class Tokenizer: def __init__(self, regular_ids: int): # The special tokens: 0=PAD, 1=EOS, and 2=UNK self._num_special_tokens = 3 self._num_regular_tokens = regular_ids def encode(self, token_ids): encoded = [] for token_id in token_ids: if not 0 <= token_id < self._num_regular_tokens: raise ValueError( f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})" ) encoded.append(token_id + self._num_special_tokens) # Add EOS token encoded.append(1) # Pad to till INPUT_FEATURE_LENGTH encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) return encoded class Codec: """Encode and decode events. Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not include things like EOS or UNK token handling. To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required and specified separately. """ def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): """Define Codec. Args: max_shift_steps: Maximum number of shift steps that can be encoded. steps_per_second: Shift steps will be interpreted as having a duration of 1 / steps_per_second. event_ranges: Other supported event types and their ranges. """ self.steps_per_second = steps_per_second self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) self._event_ranges = [self._shift_range] + event_ranges # Ensure all event types have unique names. assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) @property def num_classes(self) -> int: return sum(er.max_value - er.min_value + 1 for er in self._event_ranges) # The next couple methods are simplified special case methods just for shift # events that are intended to be used from within autograph functions. def is_shift_event_index(self, index: int) -> bool: return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value) @property def max_shift_steps(self) -> int: return self._shift_range.max_value def encode_event(self, event: Event) -> int: """Encode an event to an index.""" offset = 0 for er in self._event_ranges: if event.type == er.type: if not er.min_value <= event.value <= er.max_value: raise ValueError( f"Event value {event.value} is not within valid range " f"[{er.min_value}, {er.max_value}] for type {event.type}" ) return offset + event.value - er.min_value offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event type: {event.type}") def event_type_range(self, event_type: str) -> Tuple[int, int]: """Return [min_id, max_id] for an event type.""" offset = 0 for er in self._event_ranges: if event_type == er.type: return offset, offset + (er.max_value - er.min_value) offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event type: {event_type}") def decode_event_index(self, index: int) -> Event: """Decode an event index to an Event.""" offset = 0 for er in self._event_ranges: if offset <= index <= offset + er.max_value - er.min_value: return Event(type=er.type, value=er.min_value + index - offset) offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event index: {index}") @dataclasses.dataclass class ProgramGranularity: # both tokens_map_fn and program_map_fn should be idempotent tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] program_map_fn: Callable[[int], int] def drop_programs(tokens, codec: Codec): """Drops program change events from a token sequence.""" min_program_id, max_program_id = codec.event_type_range("program") return tokens[(tokens < min_program_id) | (tokens > max_program_id)] def programs_to_midi_classes(tokens, codec): """Modifies program events to be the first program in the MIDI class.""" min_program_id, max_program_id = codec.event_type_range("program") is_program = (tokens >= min_program_id) & (tokens <= max_program_id) return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) PROGRAM_GRANULARITIES = { # "flat" granularity; drop program change tokens and set NoteSequence # programs to zero "flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), # map each program to the first program in its MIDI class "midi_class": ProgramGranularity( tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8) ), # leave programs as is "full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program), } def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): """ equivalent of tf.signal.frame """ signal_length = signal.shape[axis] if pad_end: frames_overlap = frame_length - frame_step rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) pad_size = int(frame_length - rest_samples) if pad_size != 0: pad_axis = [0] * signal.ndim pad_axis[axis] = pad_size signal = F.pad(signal, pad_axis, "constant", pad_value) frames = signal.unfold(axis, frame_length, frame_step) return frames def program_to_slakh_program(program): # this is done very hackily, probably should use a custom mapping for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): if program >= slakh_program: return slakh_program def audio_to_frames( samples, hop_size: int, frame_rate: int, ) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: """Convert audio samples to non-overlapping frames and frame times.""" frame_size = hop_size samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant") # Split audio into frames. frames = frame( torch.Tensor(samples).unsqueeze(0), frame_length=frame_size, frame_step=frame_size, pad_end=False, # TODO check why its off by 1 here when True ) num_frames = len(samples) // frame_size times = np.arange(num_frames) / frame_rate return frames, times def note_sequence_to_onsets_and_offsets_and_programs( ns: note_seq.NoteSequence, ) -> Tuple[Sequence[float], Sequence[NoteEventData]]: """Extract onset & offset times and pitches & programs from a NoteSequence. The onset & offset times will not necessarily be in sorted order. Args: ns: NoteSequence from which to extract onsets and offsets. Returns: times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for note offsets. """ # Sort by program and pitch and put offsets before onsets as a tiebreaker for # subsequent stable sort. notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] values = [ NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) for note in notes if not note.is_drum ] + [ NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) for note in notes ] return times, values def num_velocity_bins_from_codec(codec: Codec): """Get number of velocity bins from event codec.""" lo, hi = codec.event_type_range("velocity") return hi - lo # segment an array into segments of length n def segment(a, n): return [a[i : i + n] for i in range(0, len(a), n)] def velocity_to_bin(velocity, num_velocity_bins): if velocity == 0: return 0 else: return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) def note_event_data_to_events( state: Optional[NoteEncodingState], value: NoteEventData, codec: Codec, ) -> Sequence[Event]: """Convert note event data to a sequence of events.""" if value.velocity is None: # onsets only, no program or velocity return [Event("pitch", value.pitch)] else: num_velocity_bins = num_velocity_bins_from_codec(codec) velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) if value.program is None: # onsets + offsets + velocities only, no programs if state is not None: state.active_pitches[(value.pitch, 0)] = velocity_bin return [Event("velocity", velocity_bin), Event("pitch", value.pitch)] else: if value.is_drum: # drum events use a separate vocabulary return [Event("velocity", velocity_bin), Event("drum", value.pitch)] else: # program + velocity + pitch if state is not None: state.active_pitches[(value.pitch, value.program)] = velocity_bin return [ Event("program", value.program), Event("velocity", velocity_bin), Event("pitch", value.pitch), ] def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: """Output program and pitch events for active notes plus a final tie event.""" events = [] for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): if state.active_pitches[(pitch, program)]: events += [Event("program", program), Event("pitch", pitch)] events.append(Event("tie", 0)) return events def encode_and_index_events( state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None ): """Encode a sequence of timed events and index to audio frame times. Encodes time shifts as repeated single step shifts for later run length encoding. Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio frame. This can be used e.g. to prepend events representing the current state to a targets segment. Args: state: Initial event encoding state. event_times: Sequence of event times. event_values: Sequence of event values. encode_event_fn: Function that transforms event value into a sequence of one or more Event objects. codec: An Codec object that maps Event objects to indices. frame_times: Time for every audio frame. encoding_state_to_events_fn: Function that transforms encoding state into a sequence of one or more Event objects. Returns: events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame. Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of another. event_end_indices: Corresponding end event index for every audio frame. Used to ensure when slicing that one chunk ends where the next begins. Should always be true that event_end_indices[i] = event_start_indices[i + 1]. state_events: Encoded "state" events representing the encoding state before each event. state_event_indices: Corresponding state event index for every audio frame. """ indices = np.argsort(event_times, kind="stable") event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] event_values = [event_values[i] for i in indices] events = [] state_events = [] event_start_indices = [] state_event_indices = [] cur_step = 0 cur_event_idx = 0 cur_state_event_idx = 0 def fill_event_start_indices_to_cur_step(): while ( len(event_start_indices) < len(frame_times) and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second ): event_start_indices.append(cur_event_idx) state_event_indices.append(cur_state_event_idx) for event_step, event_value in zip(event_steps, event_values): while event_step > cur_step: events.append(codec.encode_event(Event(type="shift", value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) cur_state_event_idx = len(state_events) if encoding_state_to_events_fn: # Dump state to state events *before* processing the next event, because # we want to capture the state prior to the occurrence of the event. for e in encoding_state_to_events_fn(state): state_events.append(codec.encode_event(e)) for e in encode_event_fn(state, event_value, codec): events.append(codec.encode_event(e)) # After the last event, continue filling out the event_start_indices array. # The inequality is not strict because if our current step lines up exactly # with (the start of) an audio frame, we need to add an additional shift event # to "cover" that frame. while cur_step / codec.steps_per_second <= frame_times[-1]: events.append(codec.encode_event(Event(type="shift", value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) # Now fill in event_end_indices. We need this extra array to make sure that # when we slice events, each slice ends exactly where the subsequent slice # begins. event_end_indices = event_start_indices[1:] + [len(events)] events = np.array(events).astype(np.int32) state_events = np.array(state_events).astype(np.int32) event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) outputs = [] for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices): outputs.append( { "inputs": events, "event_start_indices": start_indices, "event_end_indices": end_indices, "state_events": state_events, "state_event_indices": event_indices, } ) return outputs def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"): """Extract target sequence corresponding to audio token segment.""" features = features.copy() start_idx = features["event_start_indices"][0] end_idx = features["event_end_indices"][-1] features[feature_key] = features[feature_key][start_idx:end_idx] if state_events_end_token is not None: # Extract the state events corresponding to the audio start token, and # prepend them to the targets array. state_event_start_idx = features["state_event_indices"][0] state_event_end_idx = state_event_start_idx + 1 while features["state_events"][state_event_end_idx - 1] != state_events_end_token: state_event_end_idx += 1 features[feature_key] = np.concatenate( [ features["state_events"][state_event_start_idx:state_event_end_idx], features[feature_key], ], axis=0, ) return features def map_midi_programs( feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs" ) -> Mapping[str, Any]: """Apply MIDI program map to token sequences.""" granularity = PROGRAM_GRANULARITIES[granularity_type] feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) return feature def run_length_encode_shifts_fn( features, codec: Codec, feature_key: str = "inputs", state_change_event_types: Sequence[str] = (), ) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: """Return a function that run-length encodes shifts for a given codec. Args: codec: The Codec to use for shift events. feature_key: The feature key for which to run-length encode shifts. state_change_event_types: A list of event types that represent state changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones will be removed. Returns: A preprocessing function that run-length encodes single-step shifts. """ state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: """Combine leading/interior shifts, trim trailing shifts. Args: features: Dict of features to process. Returns: A dict of features. """ events = features[feature_key] shift_steps = 0 total_shift_steps = 0 output = np.array([], dtype=np.int32) current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) for event in events: if codec.is_shift_event_index(event): shift_steps += 1 total_shift_steps += 1 else: # If this event is a state change and has the same value as the current # state, we can skip it entirely. is_redundant = False for i, (min_index, max_index) in enumerate(state_change_event_ranges): if (min_index <= event) and (event <= max_index): if current_state[i] == event: is_redundant = True current_state[i] = event if is_redundant: continue # Once we've reached a non-shift event, RLE all previous shift events # before outputting the non-shift event. if shift_steps > 0: shift_steps = total_shift_steps while shift_steps > 0: output_steps = np.minimum(codec.max_shift_steps, shift_steps) output = np.concatenate([output, [output_steps]], axis=0) shift_steps -= output_steps output = np.concatenate([output, [event]], axis=0) features[feature_key] = output return features return run_length_encode_shifts(features) def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): tie_token = codec.encode_event(Event("tie", 0)) state_events_end_token = tie_token if note_representation_config.include_ties else None features = extract_sequence_with_indices( features, state_events_end_token=state_events_end_token, feature_key="inputs" ) features = map_midi_programs(features, codec) features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"]) return features class MidiProcessor: def __init__(self): self.codec = Codec( max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, steps_per_second=DEFAULT_STEPS_PER_SECOND, event_ranges=[ EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS), EventRange("tie", 0, 0), EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), ], ) self.tokenizer = Tokenizer(self.codec.num_classes) self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) def __call__(self, midi: Union[bytes, os.PathLike, str]): if not isinstance(midi, bytes): with open(midi, "rb") as f: midi = f.read() ns = note_seq.midi_to_note_sequence(midi) ns_sus = note_seq.apply_sustain_control_changes(ns) for note in ns_sus.notes: if not note.is_drum: note.program = program_to_slakh_program(note.program) samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) _, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) events = encode_and_index_events( state=NoteEncodingState(), event_times=times, event_values=values, frame_times=frame_times, codec=self.codec, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=note_encoding_state_to_events, ) events = [ note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events ] input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events] return input_tokens
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py", "repo_id": "diffusers", "token_count": 10185 }
147
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): r""" Pipeline for image variation using Versatile Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" image_feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection image_unet: UNet2DConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers def __init__( self, image_feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, image_unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( image_feature_extractor=image_feature_extractor, image_encoder=image_encoder, image_unet=image_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ def normalize_embeddings(encoder_output): embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) embeds = self.image_encoder.visual_projection(embeds) embeds_pooled = embeds[:, 0:1] embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) return embeds if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4: prompt = list(prompt) batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) image_embeddings = self.image_encoder(pixel_values) image_embeddings = normalize_embeddings(image_embeddings) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_images: List[str] if negative_prompt is None: uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, PIL.Image.Image): uncond_images = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_images = negative_prompt uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) negative_prompt_embeds = self.image_encoder(pixel_values) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and conditional embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs def check_inputs(self, image, height, width, callback_steps): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" The call function to the pipeline for generation. Args: image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): The image prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionImageVariationPipeline >>> import torch >>> import requests >>> from io import BytesIO >>> from PIL import Image >>> # let's download an initial image >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" >>> response = requests.get(url) >>> image = Image.open(BytesIO(response.content)).convert("RGB") >>> pipe = VersatileDiffusionImageVariationPipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe(image, generator=generator).images[0] >>> image.save("./car_variation.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(image, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(image, PIL.Image.Image) else len(image) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt image_embeddings = self._encode_prompt( image, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py", "repo_id": "diffusers", "token_count": 8231 }
148
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_kandinsky"] = ["KandinskyPipeline"] _import_structure["pipeline_kandinsky_combined"] = [ "KandinskyCombinedPipeline", "KandinskyImg2ImgCombinedPipeline", "KandinskyInpaintCombinedPipeline", ] _import_structure["pipeline_kandinsky_img2img"] = ["KandinskyImg2ImgPipeline"] _import_structure["pipeline_kandinsky_inpaint"] = ["KandinskyInpaintPipeline"] _import_structure["pipeline_kandinsky_prior"] = ["KandinskyPriorPipeline", "KandinskyPriorPipelineOutput"] _import_structure["text_encoder"] = ["MultilingualCLIP"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_combined import ( KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline, ) from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/kandinsky/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/__init__.py", "repo_id": "diffusers", "token_count": 951 }
149
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import Dict, List, Tuple, Union import torch import torch.nn as nn from ...models.attention_processor import ( Attention, AttentionProcessor, PAGCFGIdentitySelfAttnProcessor2_0, PAGIdentitySelfAttnProcessor2_0, ) from ...utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class PAGMixin: r"""Mixin class for [Pertubed Attention Guidance](https://arxiv.org/abs/2403.17377v1).""" def _set_pag_attn_processor(self, pag_applied_layers, do_classifier_free_guidance): r""" Set the attention processor for the PAG layers. """ pag_attn_processors = self._pag_attn_processors if pag_attn_processors is None: raise ValueError( "No PAG attention processors have been set. Set the attention processors by calling `set_pag_applied_layers` and passing the relevant parameters." ) pag_attn_proc = pag_attn_processors[0] if do_classifier_free_guidance else pag_attn_processors[1] if hasattr(self, "unet"): model: nn.Module = self.unet else: model: nn.Module = self.transformer def is_self_attn(module: nn.Module) -> bool: r""" Check if the module is self-attention module based on its name. """ return isinstance(module, Attention) and not module.is_cross_attention def is_fake_integral_match(layer_id, name): layer_id = layer_id.split(".")[-1] name = name.split(".")[-1] return layer_id.isnumeric() and name.isnumeric() and layer_id == name for layer_id in pag_applied_layers: # for each PAG layer input, we find corresponding self-attention layers in the unet model target_modules = [] for name, module in model.named_modules(): # Identify the following simple cases: # (1) Self Attention layer existing # (2) Whether the module name matches pag layer id even partially # (3) Make sure it's not a fake integral match if the layer_id ends with a number # For example, blocks.1, blocks.10 should be differentiable if layer_id="blocks.1" if ( is_self_attn(module) and re.search(layer_id, name) is not None and not is_fake_integral_match(layer_id, name) ): logger.debug(f"Applying PAG to layer: {name}") target_modules.append(module) if len(target_modules) == 0: raise ValueError(f"Cannot find PAG layer to set attention processor for: {layer_id}") for module in target_modules: module.processor = pag_attn_proc def _get_pag_scale(self, t): r""" Get the scale factor for the perturbed attention guidance at timestep `t`. """ if self.do_pag_adaptive_scaling: signal_scale = self.pag_scale - self.pag_adaptive_scale * (1000 - t) if signal_scale < 0: signal_scale = 0 return signal_scale else: return self.pag_scale def _apply_perturbed_attention_guidance(self, noise_pred, do_classifier_free_guidance, guidance_scale, t): r""" Apply perturbed attention guidance to the noise prediction. Args: noise_pred (torch.Tensor): The noise prediction tensor. do_classifier_free_guidance (bool): Whether to apply classifier-free guidance. guidance_scale (float): The scale factor for the guidance term. t (int): The current time step. Returns: torch.Tensor: The updated noise prediction tensor after applying perturbed attention guidance. """ pag_scale = self._get_pag_scale(t) if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + pag_scale * (noise_pred_text - noise_pred_perturb) ) else: noise_pred_text, noise_pred_perturb = noise_pred.chunk(2) noise_pred = noise_pred_text + pag_scale * (noise_pred_text - noise_pred_perturb) return noise_pred def _prepare_perturbed_attention_guidance(self, cond, uncond, do_classifier_free_guidance): """ Prepares the perturbed attention guidance for the PAG model. Args: cond (torch.Tensor): The conditional input tensor. uncond (torch.Tensor): The unconditional input tensor. do_classifier_free_guidance (bool): Flag indicating whether to perform classifier-free guidance. Returns: torch.Tensor: The prepared perturbed attention guidance tensor. """ cond = torch.cat([cond] * 2, dim=0) if do_classifier_free_guidance: cond = torch.cat([uncond, cond], dim=0) return cond def set_pag_applied_layers( self, pag_applied_layers: Union[str, List[str]], pag_attn_processors: Tuple[AttentionProcessor, AttentionProcessor] = ( PAGCFGIdentitySelfAttnProcessor2_0(), PAGIdentitySelfAttnProcessor2_0(), ), ): r""" Set the the self-attention layers to apply PAG. Raise ValueError if the input is invalid. Args: pag_applied_layers (`str` or `List[str]`): One or more strings identifying the layer names, or a simple regex for matching multiple layers, where PAG is to be applied. A few ways of expected usage are as follows: - Single layers specified as - "blocks.{layer_index}" - Multiple layers as a list - ["blocks.{layers_index_1}", "blocks.{layer_index_2}", ...] - Multiple layers as a block name - "mid" - Multiple layers as regex - "blocks.({layer_index_1}|{layer_index_2})" pag_attn_processors: (`Tuple[AttentionProcessor, AttentionProcessor]`, defaults to `(PAGCFGIdentitySelfAttnProcessor2_0(), PAGIdentitySelfAttnProcessor2_0())`): A tuple of two attention processors. The first attention processor is for PAG with Classifier-free guidance enabled (conditional and unconditional). The second attention processor is for PAG with CFG disabled (unconditional only). """ if not hasattr(self, "_pag_attn_processors"): self._pag_attn_processors = None if not isinstance(pag_applied_layers, list): pag_applied_layers = [pag_applied_layers] if pag_attn_processors is not None: if not isinstance(pag_attn_processors, tuple) or len(pag_attn_processors) != 2: raise ValueError("Expected a tuple of two attention processors") for i in range(len(pag_applied_layers)): if not isinstance(pag_applied_layers[i], str): raise ValueError( f"Expected either a string or a list of string but got type {type(pag_applied_layers[i])}" ) self.pag_applied_layers = pag_applied_layers self._pag_attn_processors = pag_attn_processors @property def pag_scale(self) -> float: r"""Get the scale factor for the perturbed attention guidance.""" return self._pag_scale @property def pag_adaptive_scale(self) -> float: r"""Get the adaptive scale factor for the perturbed attention guidance.""" return self._pag_adaptive_scale @property def do_pag_adaptive_scaling(self) -> bool: r"""Check if the adaptive scaling is enabled for the perturbed attention guidance.""" return self._pag_adaptive_scale > 0 and self._pag_scale > 0 and len(self.pag_applied_layers) > 0 @property def do_perturbed_attention_guidance(self) -> bool: r"""Check if the perturbed attention guidance is enabled.""" return self._pag_scale > 0 and len(self.pag_applied_layers) > 0 @property def pag_attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of PAG attention processors: A dictionary contains all PAG attention processors used in the model with the key as the name of the layer. """ if self._pag_attn_processors is None: return {} valid_attn_processors = {x.__class__ for x in self._pag_attn_processors} processors = {} # We could have iterated through the self.components.items() and checked if a component is # `ModelMixin` subclassed but that can include a VAE too. if hasattr(self, "unet"): denoiser_module = self.unet elif hasattr(self, "transformer"): denoiser_module = self.transformer else: raise ValueError("No denoiser module found.") for name, proc in denoiser_module.attn_processors.items(): if proc.__class__ in valid_attn_processors: processors[name] = proc return processors
diffusers/src/diffusers/pipelines/pag/pag_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/pag/pag_utils.py", "repo_id": "diffusers", "token_count": 4257 }
150
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["modeling_stable_audio"] = ["StableAudioProjectionModel"] _import_structure["pipeline_stable_audio"] = ["StableAudioPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .modeling_stable_audio import StableAudioProjectionModel from .pipeline_stable_audio import StableAudioPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_audio/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_audio/__init__.py", "repo_id": "diffusers", "token_count": 604 }
151
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import PIL_INTERPOLATION, deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name NUM_UNET_INPUT_CHANNELS = 9 NUM_LATENT_CHANNELS = 4 def prepare_mask_and_masked_image(image, mask, latents_shape): image = np.array(image.convert("RGB").resize((latents_shape[1] * 8, latents_shape[0] * 8))) image = image[None].transpose(0, 3, 1, 2) image = image.astype(np.float32) / 127.5 - 1.0 image_mask = np.array(mask.convert("L").resize((latents_shape[1] * 8, latents_shape[0] * 8))) masked_image = image * (image_mask < 127.5) mask = mask.resize((latents_shape[1], latents_shape[0]), PIL_INTERPOLATION["nearest"]) mask = np.array(mask.convert("L")) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 return mask, masked_image class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): r""" Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ["safety_checker", "feature_extractor"] _is_onnx = True def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() logger.info("`OnnxStableDiffusionInpaintPipeline` is experimental and will very likely change in the future.") if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="np", ) negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline.check_inputs def check_inputs( self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str] = None, prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: PIL.Image.Image, mask_image: PIL.Image.Image, height: Optional[int] = 512, width: Optional[int] = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[np.random.RandomState] = None, latents: Optional[np.ndarray] = None, prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`np.random.RandomState`, *optional*): A np.random.RandomState to make generation deterministic. latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random # set timesteps self.scheduler.set_timesteps(num_inference_steps) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) num_channels_latents = NUM_LATENT_CHANNELS latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8) latents_dtype = prompt_embeds.dtype if latents is None: latents = generator.randn(*latents_shape).astype(latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") # prepare mask and masked_image mask, masked_image = prepare_mask_and_masked_image(image, mask_image, latents_shape[-2:]) mask = mask.astype(latents.dtype) masked_image = masked_image.astype(latents.dtype) masked_image_latents = self.vae_encoder(sample=masked_image)[0] masked_image_latents = 0.18215 * masked_image_latents # duplicate mask and masked_image_latents for each generation per prompt mask = mask.repeat(batch_size * num_images_per_prompt, 0) masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 0) mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = ( np.concatenate([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] unet_input_channels = NUM_UNET_INPUT_CHANNELS if num_channels_latents + num_channels_mask + num_channels_masked_image != unet_input_channels: raise ValueError( "Incorrect configuration settings! The config of `pipeline.unet` expects" f" {unet_input_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # scale the initial noise by the standard deviation required by the scheduler latents = latents * np.float64(self.scheduler.init_noise_sigma) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta timestep_dtype = next( (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" ) timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latnets in the channel dimension latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() latent_model_input = np.concatenate([latent_model_input, mask, masked_image_latents], axis=1) # predict the noise residual timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ 0 ] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 scheduler_output = self.scheduler.step( torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs ) latents = scheduler_output.prev_sample.numpy() # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents # image = self.vae_decoder(latent_sample=latents)[0] # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 image = np.concatenate( [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] ) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) if self.safety_checker is not None: safety_checker_input = self.feature_extractor( self.numpy_to_pil(image), return_tensors="np" ).pixel_values.astype(image.dtype) # safety_checker does not support batched inputs yet images, has_nsfw_concept = [], [] for i in range(image.shape[0]): image_i, has_nsfw_concept_i = self.safety_checker( clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] ) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py", "repo_id": "diffusers", "token_count": 12530 }
152
import numpy as np import torch from ...utils import is_invisible_watermark_available if is_invisible_watermark_available(): from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class StableDiffusionXLWatermarker: def __init__(self): self.watermark = WATERMARK_BITS self.encoder = WatermarkEncoder() self.encoder.set_watermark("bits", self.watermark) def apply_watermark(self, images: torch.Tensor): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() # Convert RGB to BGR, which is the channel order expected by the watermark encoder. images = images[:, :, :, ::-1] # Add watermark and convert BGR back to RGB images = [self.encoder.encode(image, "dwtDct")[:, :, ::-1] for image in images] images = np.array(images) images = torch.from_numpy(images).permute(0, 3, 1, 2) images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) return images
diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py", "repo_id": "diffusers", "token_count": 596 }
153
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) _dummy_objects.update( {"ImageTextPipelineOutput": ImageTextPipelineOutput, "UniDiffuserPipeline": UniDiffuserPipeline} ) else: _import_structure["modeling_text_decoder"] = ["UniDiffuserTextDecoder"] _import_structure["modeling_uvit"] = ["UniDiffuserModel", "UTransformer2DModel"] _import_structure["pipeline_unidiffuser"] = ["ImageTextPipelineOutput", "UniDiffuserPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformer2DModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/unidiffuser/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/__init__.py", "repo_id": "diffusers", "token_count": 733 }
154
# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin @dataclass class KarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor derivative: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None class KarrasVeScheduler(SchedulerMixin, ConfigMixin): """ A stochastic scheduler tailored to variance-expanding models. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. <Tip> For more details on the parameters, see [Appendix E](https://arxiv.org/abs/2206.00364). The grid search values used to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of the paper. </Tip> Args: sigma_min (`float`, defaults to 0.02): The minimum noise magnitude. sigma_max (`float`, defaults to 100): The maximum noise magnitude. s_noise (`float`, defaults to 1.007): The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`, defaults to 80): The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`, defaults to 0.05): The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`, defaults to 50): The end value of the sigma range to add noise. A reasonable range is [0.2, 80]. """ order = 2 @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max # setable values self.num_inference_steps: int = None self.timesteps: np.IntTensor = None self.schedule: torch.Tensor = None # sigma(t_i) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) def add_noise_to_input( self, sample: torch.Tensor, sigma: float, generator: Optional[torch.Generator] = None ) -> Tuple[torch.Tensor, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i โ‰ฅ 0` to reach a higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`. Args: sample (`torch.Tensor`): The input sample. sigma (`float`): generator (`torch.Generator`, *optional*): A random number generator. """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, model_output: torch.Tensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.Tensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. sigma_hat (`float`): sigma_prev (`float`): sample_hat (`torch.Tensor`): return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def step_correct( self, model_output: torch.Tensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.Tensor, sample_prev: torch.Tensor, derivative: torch.Tensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Corrects the predicted sample based on the `model_output` of the network. Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.Tensor`): TODO sample_prev (`torch.Tensor`): TODO derivative (`torch.Tensor`): TODO return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def add_noise(self, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py", "repo_id": "diffusers", "token_count": 4063 }
155
# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin @dataclass class VQDiffusionSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.LongTensor` of shape `(batch size, num latent pixels)`): Computed sample x_{t-1} of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.LongTensor def index_to_log_onehot(x: torch.LongTensor, num_classes: int) -> torch.Tensor: """ Convert batch of vector of class indices into batch of log onehot vectors Args: x (`torch.LongTensor` of shape `(batch size, vector length)`): Batch of class indices num_classes (`int`): number of classes to be used for the onehot vectors Returns: `torch.Tensor` of shape `(batch size, num classes, vector length)`: Log onehot vectors """ x_onehot = F.one_hot(x, num_classes) x_onehot = x_onehot.permute(0, 2, 1) log_x = torch.log(x_onehot.float().clamp(min=1e-30)) return log_x def gumbel_noised(logits: torch.Tensor, generator: Optional[torch.Generator]) -> torch.Tensor: """ Apply gumbel noise to `logits` """ uniform = torch.rand(logits.shape, device=logits.device, generator=generator) gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) noised = gumbel_noise + logits return noised def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=0.000009): """ Cumulative and non-cumulative alpha schedules. See section 4.1. """ att = ( np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start) + alpha_cum_start ) att = np.concatenate(([1], att)) at = att[1:] / att[:-1] att = np.concatenate((att[1:], [1])) return at, att def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=0.000009, gamma_cum_end=0.99999): """ Cumulative and non-cumulative gamma schedules. See section 4.1. """ ctt = ( np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start) + gamma_cum_start ) ctt = np.concatenate(([0], ctt)) one_minus_ctt = 1 - ctt one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1] ct = 1 - one_minus_ct ctt = np.concatenate((ctt[1:], [0])) return ct, ctt class VQDiffusionScheduler(SchedulerMixin, ConfigMixin): """ A scheduler for vector quantized diffusion. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_vec_classes (`int`): The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. num_train_timesteps (`int`, defaults to 100): The number of diffusion steps to train the model. alpha_cum_start (`float`, defaults to 0.99999): The starting cumulative alpha value. alpha_cum_end (`float`, defaults to 0.00009): The ending cumulative alpha value. gamma_cum_start (`float`, defaults to 0.00009): The starting cumulative gamma value. gamma_cum_end (`float`, defaults to 0.99999): The ending cumulative gamma value. """ order = 1 @register_to_config def __init__( self, num_vec_classes: int, num_train_timesteps: int = 100, alpha_cum_start: float = 0.99999, alpha_cum_end: float = 0.000009, gamma_cum_start: float = 0.000009, gamma_cum_end: float = 0.99999, ): self.num_embed = num_vec_classes # By convention, the index for the mask class is the last class index self.mask_class = self.num_embed - 1 at, att = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end) ct, ctt = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end) num_non_mask_classes = self.num_embed - 1 bt = (1 - at - ct) / num_non_mask_classes btt = (1 - att - ctt) / num_non_mask_classes at = torch.tensor(at.astype("float64")) bt = torch.tensor(bt.astype("float64")) ct = torch.tensor(ct.astype("float64")) log_at = torch.log(at) log_bt = torch.log(bt) log_ct = torch.log(ct) att = torch.tensor(att.astype("float64")) btt = torch.tensor(btt.astype("float64")) ctt = torch.tensor(ctt.astype("float64")) log_cumprod_at = torch.log(att) log_cumprod_bt = torch.log(btt) log_cumprod_ct = torch.log(ctt) self.log_at = log_at.float() self.log_bt = log_bt.float() self.log_ct = log_ct.float() self.log_cumprod_at = log_cumprod_at.float() self.log_cumprod_bt = log_cumprod_bt.float() self.log_cumprod_ct = log_cumprod_ct.float() # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps and diffusion process parameters (alpha, beta, gamma) should be moved to. """ self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) self.log_at = self.log_at.to(device) self.log_bt = self.log_bt.to(device) self.log_ct = self.log_ct.to(device) self.log_cumprod_at = self.log_cumprod_at.to(device) self.log_cumprod_bt = self.log_cumprod_bt.to(device) self.log_cumprod_ct = self.log_cumprod_ct.to(device) def step( self, model_output: torch.Tensor, timestep: torch.long, sample: torch.LongTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[VQDiffusionSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by the reverse transition distribution. See [`~VQDiffusionScheduler.q_posterior`] for more details about how the distribution is computer. Args: log_p_x_0: (`torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`): The log probabilities for the predicted classes of the initial latent pixels. Does not include a prediction for the masked class as the initial unnoised image cannot be masked. t (`torch.long`): The timestep that determines which transition matrices are used. x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): The classes of each latent pixel at time `t`. generator (`torch.Generator`, or `None`): A random number generator for the noise applied to `p(x_{t-1} | x_t)` before it is sampled from. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if timestep == 0: log_p_x_t_min_1 = model_output else: log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep) log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator) x_t_min_1 = log_p_x_t_min_1.argmax(dim=1) if not return_dict: return (x_t_min_1,) return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1) def q_posterior(self, log_p_x_0, x_t, t): """ Calculates the log probabilities for the predicted classes of the image at timestep `t-1`: ``` p(x_{t-1} | x_t) = sum( q(x_t | x_{t-1}) * q(x_{t-1} | x_0) * p(x_0) / q(x_t | x_0) ) ``` Args: log_p_x_0 (`torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`): The log probabilities for the predicted classes of the initial latent pixels. Does not include a prediction for the masked class as the initial unnoised image cannot be masked. x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): The classes of each latent pixel at time `t`. t (`torch.Long`): The timestep that determines which transition matrix is used. Returns: `torch.Tensor` of shape `(batch size, num classes, num latent pixels)`: The log probabilities for the predicted classes of the image at timestep `t-1`. """ log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed) log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class( t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True ) log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class( t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False ) # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) # . . . # . . . # . . . # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) q = log_p_x_0 - log_q_x_t_given_x_0 # sum_0 = p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}), ... , # sum_n = p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True) # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0 ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n # . . . # . . . # . . . # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0 ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n q = q - q_log_sum_exp # (p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} # . . . # . . . # . . . # (p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} # c_cumulative_{t-1} ... c_cumulative_{t-1} q = self.apply_cumulative_transitions(q, t - 1) # ((p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_0 ... ((p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_n # . . . # . . . # . . . # ((p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_0 ... ((p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_n # c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 ... c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp # For each column, there are two possible cases. # # Where: # - sum(p_n(x_0))) is summing over all classes for x_0 # - C_i is the class transitioning from (not to be confused with c_t and c_cumulative_t being used for gamma's) # - C_j is the class transitioning to # # 1. x_t is masked i.e. x_t = c_k # # Simplifying the expression, the column vector is: # . # . # . # (c_t / c_cumulative_t) * (a_cumulative_{t-1} * p_n(x_0 = C_i | x_t) + b_cumulative_{t-1} * sum(p_n(x_0))) # . # . # . # (c_cumulative_{t-1} / c_cumulative_t) * sum(p_n(x_0)) # # From equation (11) stated in terms of forward probabilities, the last row is trivially verified. # # For the other rows, we can state the equation as ... # # (c_t / c_cumulative_t) * [b_cumulative_{t-1} * p(x_0=c_0) + ... + (a_cumulative_{t-1} + b_cumulative_{t-1}) * p(x_0=C_i) + ... + b_cumulative_{k-1} * p(x_0=c_{k-1})] # # This verifies the other rows. # # 2. x_t is not masked # # Simplifying the expression, there are two cases for the rows of the column vector, where C_j = C_i and where C_j != C_i: # . # . # . # C_j != C_i: b_t * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / b_cumulative_t) * p_n(x_0 = C_i) + ... + (b_cumulative_{t-1} / (a_cumulative_t + b_cumulative_t)) * p_n(c_0=C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) # . # . # . # C_j = C_i: (a_t + b_t) * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / (a_cumulative_t + b_cumulative_t)) * p_n(x_0 = C_i = C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) # . # . # . # 0 # # The last row is trivially verified. The other rows can be verified by directly expanding equation (11) stated in terms of forward probabilities. return log_p_x_t_min_1 def log_Q_t_transitioning_to_known_class( self, *, t: torch.int, x_t: torch.LongTensor, log_onehot_x_t: torch.Tensor, cumulative: bool ): """ Calculates the log probabilities of the rows from the (cumulative or non-cumulative) transition matrix for each latent pixel in `x_t`. Args: t (`torch.Long`): The timestep that determines which transition matrix is used. x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): The classes of each latent pixel at time `t`. log_onehot_x_t (`torch.Tensor` of shape `(batch size, num classes, num latent pixels)`): The log one-hot vectors of `x_t`. cumulative (`bool`): If cumulative is `False`, the single step transition matrix `t-1`->`t` is used. If cumulative is `True`, the cumulative transition matrix `0`->`t` is used. Returns: `torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`: Each _column_ of the returned matrix is a _row_ of log probabilities of the complete probability transition matrix. When non cumulative, returns `self.num_classes - 1` rows because the initial latent pixel cannot be masked. Where: - `q_n` is the probability distribution for the forward process of the `n`th latent pixel. - C_0 is a class of a latent pixel embedding - C_k is the class of the masked latent pixel non-cumulative result (omitting logarithms): ``` q_0(x_t | x_{t-1} = C_0) ... q_n(x_t | x_{t-1} = C_0) . . . . . . . . . q_0(x_t | x_{t-1} = C_k) ... q_n(x_t | x_{t-1} = C_k) ``` cumulative result (omitting logarithms): ``` q_0_cumulative(x_t | x_0 = C_0) ... q_n_cumulative(x_t | x_0 = C_0) . . . . . . . . . q_0_cumulative(x_t | x_0 = C_{k-1}) ... q_n_cumulative(x_t | x_0 = C_{k-1}) ``` """ if cumulative: a = self.log_cumprod_at[t] b = self.log_cumprod_bt[t] c = self.log_cumprod_ct[t] else: a = self.log_at[t] b = self.log_bt[t] c = self.log_ct[t] if not cumulative: # The values in the onehot vector can also be used as the logprobs for transitioning # from masked latent pixels. If we are not calculating the cumulative transitions, # we need to save these vectors to be re-appended to the final matrix so the values # aren't overwritten. # # `P(x_t!=mask|x_{t-1=mask}) = 0` and 0 will be the value of the last row of the onehot vector # if x_t is not masked # # `P(x_t=mask|x_{t-1=mask}) = 1` and 1 will be the value of the last row of the onehot vector # if x_t is masked log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1) # `index_to_log_onehot` will add onehot vectors for masked pixels, # so the default one hot matrix has one too many rows. See the doc string # for an explanation of the dimensionality of the returned matrix. log_onehot_x_t = log_onehot_x_t[:, :-1, :] # this is a cheeky trick to produce the transition probabilities using log one-hot vectors. # # Don't worry about what values this sets in the columns that mark transitions # to masked latent pixels. They are overwrote later with the `mask_class_mask`. # # Looking at the below logspace formula in non-logspace, each value will evaluate to either # `1 * a + b = a + b` where `log_Q_t` has the one hot value in the column # or # `0 * a + b = b` where `log_Q_t` has the 0 values in the column. # # See equation 7 for more details. log_Q_t = (log_onehot_x_t + a).logaddexp(b) # The whole column of each masked pixel is `c` mask_class_mask = x_t == self.mask_class mask_class_mask = mask_class_mask.unsqueeze(1).expand(-1, self.num_embed - 1, -1) log_Q_t[mask_class_mask] = c if not cumulative: log_Q_t = torch.cat((log_Q_t, log_onehot_x_t_transitioning_from_masked), dim=1) return log_Q_t def apply_cumulative_transitions(self, q, t): bsz = q.shape[0] a = self.log_cumprod_at[t] b = self.log_cumprod_bt[t] c = self.log_cumprod_ct[t] num_latent_pixels = q.shape[2] c = c.expand(bsz, 1, num_latent_pixels) q = (q + a).logaddexp(b) q = torch.cat((q, c), dim=1) return q
diffusers/src/diffusers/schedulers/scheduling_vq_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_vq_diffusion.py", "repo_id": "diffusers", "token_count": 12476 }
156
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class OnnxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) class OnnxStableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): _backends = ["torch", "transformers", "onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) class OnnxStableDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) class OnnxStableDiffusionUpscalePipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) class StableDiffusionOnnxPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "onnx"])
diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py", "repo_id": "diffusers", "token_count": 1270 }
157
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch utilities: Utilities related to PyTorch """ from typing import List, Optional, Tuple, Union from . import logging from .import_utils import is_torch_available, is_torch_version if is_torch_available(): import torch from torch.fft import fftn, fftshift, ifftn, ifftshift logger = logging.get_logger(__name__) # pylint: disable=invalid-name try: from torch._dynamo import allow_in_graph as maybe_allow_in_graph except (ImportError, ModuleNotFoundError): def maybe_allow_in_graph(cls): return cls def randn_tensor( shape: Union[Tuple, List], generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, device: Optional["torch.device"] = None, dtype: Optional["torch.dtype"] = None, layout: Optional["torch.layout"] = None, ): """A helper function to create random tensors on the desired `device` with the desired `dtype`. When passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor is always created on the CPU. """ # device on which tensor is created defaults to device rand_device = device batch_size = shape[0] layout = layout or torch.strided device = device or torch.device("cpu") if generator is not None: gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type if gen_device_type != device.type and gen_device_type == "cpu": rand_device = "cpu" if device != "mps": logger.info( f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" f" slighly speed up this function by passing a generator that was created on the {device} device." ) elif gen_device_type != device.type and gen_device_type == "cuda": raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") # make sure generator list of length 1 is treated like a non-list if isinstance(generator, list) and len(generator) == 1: generator = generator[0] if isinstance(generator, list): shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) return latents def is_compiled_module(module) -> bool: """Check whether the module was compiled with torch.compile()""" if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): return False return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor": """Fourier filter as introduced in FreeU (https://arxiv.org/abs/2309.11497). This version of the method comes from here: https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706 """ x = x_in B, C, H, W = x.shape # Non-power of 2 images must be float32 if (W & (W - 1)) != 0 or (H & (H - 1)) != 0: x = x.to(dtype=torch.float32) # FFT x_freq = fftn(x, dim=(-2, -1)) x_freq = fftshift(x_freq, dim=(-2, -1)) B, C, H, W = x_freq.shape mask = torch.ones((B, C, H, W), device=x.device) crow, ccol = H // 2, W // 2 mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale x_freq = x_freq * mask # IFFT x_freq = ifftshift(x_freq, dim=(-2, -1)) x_filtered = ifftn(x_freq, dim=(-2, -1)).real return x_filtered.to(dtype=x_in.dtype) def apply_freeu( resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs ) -> Tuple["torch.Tensor", "torch.Tensor"]: """Applies the FreeU mechanism as introduced in https: //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. Args: resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. hidden_states (`torch.Tensor`): Inputs to the underlying block. res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if resolution_idx == 0: num_half_channels = hidden_states.shape[1] // 2 hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"] res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"]) if resolution_idx == 1: num_half_channels = hidden_states.shape[1] // 2 hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"] res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"]) return hidden_states, res_hidden_states
diffusers/src/diffusers/utils/torch_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/torch_utils.py", "repo_id": "diffusers", "token_count": 2338 }
158
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from ..test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class FlaxAutoencoderKLTests(FlaxModelTesterMixin, unittest.TestCase): model_class = FlaxAutoencoderKL @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) prng_key = jax.random.PRNGKey(0) image = jax.random.uniform(prng_key, ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict
diffusers/tests/models/autoencoders/test_models_vae_flax.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_vae_flax.py", "repo_id": "diffusers", "token_count": 513 }
159
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import LuminaNextDiT2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LuminaNextDiT2DModel main_input_name = "hidden_states" @property def dummy_input(self): """ Args: None Returns: Dict: Dictionary of dummy input tensors """ batch_size = 2 # N num_channels = 4 # C height = width = 16 # H, W embedding_dim = 32 # D sequence_length = 16 # L hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.rand(size=(batch_size,)).to(torch_device) encoder_mask = torch.randn(size=(batch_size, sequence_length)).to(torch_device) image_rotary_emb = torch.randn((384, 384, 4)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "encoder_mask": encoder_mask, "image_rotary_emb": image_rotary_emb, "cross_attention_kwargs": {}, } @property def input_shape(self): """ Args: None Returns: Tuple: (int, int, int) """ return (4, 16, 16) @property def output_shape(self): """ Args: None Returns: Tuple: (int, int, int) """ return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): """ Args: None Returns: Tuple: (Dict, Dict) """ init_dict = { "sample_size": 16, "patch_size": 2, "in_channels": 4, "hidden_size": 24, "num_layers": 2, "num_attention_heads": 3, "num_kv_heads": 1, "multiple_of": 16, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "learn_sigma": False, "qk_norm": True, "cross_attention_dim": 32, "scaling_factor": 1.0, } inputs_dict = self.dummy_input return init_dict, inputs_dict
diffusers/tests/models/transformers/test_models_transformer_lumina.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_lumina.py", "repo_id": "diffusers", "token_count": 1467 }
160
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import tempfile import unittest from pathlib import Path from diffusers import ( DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, PNDMScheduler, logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils.testing_utils import CaptureLogger class SampleObject(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], ): pass class SampleObject2(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", f=[1, 3], ): pass class SampleObject3(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], f=[1, 3], ): pass class SampleObject4(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 5], f=[5, 4], ): pass class SampleObjectPaths(ConfigMixin): config_name = "config.json" @register_to_config def __init__(self, test_file_1=Path("foo/bar"), test_file_2=Path("foo bar\\bar")): pass class ConfigTester(unittest.TestCase): def test_load_not_from_mixin(self): with self.assertRaises(ValueError): ConfigMixin.load_config("dummy_path") def test_register_to_config(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # init ignore private arguments obj = SampleObject(_name_or_path="lalala") config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can override default obj = SampleObject(c=6) config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can use positional arguments. obj = SampleObject(1, c=6) config = obj.config assert config["a"] == 1 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] def test_save_load(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname)) new_config = new_obj.config # unfreeze configs config = dict(config) new_config = dict(new_config) assert config.pop("c") == (2, 5) # instantiated as tuple assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json config.pop("_use_default_values") assert config == new_config def test_load_ddim_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddim = DDIMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert ddim.__class__ == DDIMScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_ancestral_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerAncestralDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerAncestralDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pndm = PNDMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert pndm.__class__ == PNDMScheduler # no warning should be thrown assert cap_logger.out == "" def test_overwrite_config_on_load(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddpm = DDPMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler", prediction_type="sample", beta_end=8, ) with CaptureLogger(logger) as cap_logger_2: ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) assert ddpm.__class__ == DDPMScheduler assert ddpm.config.prediction_type == "sample" assert ddpm.config.beta_end == 8 assert ddpm_2.config.beta_start == 88 # no warning should be thrown assert cap_logger.out == "" assert cap_logger_2.out == "" def test_load_dpmsolver(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: dpm = DPMSolverMultistepScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert dpm.__class__ == DPMSolverMultistepScheduler # no warning should be thrown assert cap_logger.out == "" def test_use_default_values(self): # let's first save a config that should be in the form # a=2, # b=5, # c=(2, 5), # d="for diffusion", # e=[1, 3], config = SampleObject() config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")} # make sure that default config has all keys in `_use_default_values` assert set(config_dict.keys()) == set(config.config._use_default_values) with tempfile.TemporaryDirectory() as tmpdirname: config.save_config(tmpdirname) # now loading it with SampleObject2 should put f into `_use_default_values` config = SampleObject2.from_config(SampleObject2.load_config(tmpdirname)) assert "f" in config.config._use_default_values assert config.config.f == [1, 3] # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value # **BECAUSE** it is part of `config.config._use_default_values` new_config = SampleObject4.from_config(config.config) assert new_config.config.f == [5, 4] config.config._use_default_values.pop() new_config_2 = SampleObject4.from_config(config.config) assert new_config_2.config.f == [1, 3] # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5] assert new_config_2.config.e == [1, 3] def test_check_path_types(self): # Verify that we get a string returned from a WindowsPath or PosixPath (depending on system) config = SampleObjectPaths() json_string = config.to_json_string() result = json.loads(json_string) assert result["test_file_1"] == config.config.test_file_1.as_posix() assert result["test_file_2"] == config.config.test_file_2.as_posix()
diffusers/tests/others/test_config.py/0
{ "file_path": "diffusers/tests/others/test_config.py", "repo_id": "diffusers", "token_count": 4259 }
161
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_gpu class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.enable_model_cpu_offload() pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=3, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=3, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py", "repo_id": "diffusers", "token_count": 4070 }
162
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, StableCascadeCombinedPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableCascadeCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableCascadeCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "conditioning_dim": 128, "block_out_channels": (128, 128), "num_attention_heads": (2, 2), "down_num_layers_per_block": (1, 1), "up_num_layers_per_block": (1, 1), "clip_image_in_channels": 768, "switch_level": (False,), "clip_text_in_channels": self.text_embedder_hidden_size, "clip_text_pooled_in_channels": self.text_embedder_hidden_size, } model = StableCascadeUNet(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, "out_channels": 4, "conditioning_dim": 128, "block_out_channels": (16, 32, 64, 128), "num_attention_heads": (-1, -1, 1, 2), "down_num_layers_per_block": (1, 1, 1, 1), "up_num_layers_per_block": (1, 1, 1, 1), "down_blocks_repeat_mappers": (1, 1, 1, 1), "up_blocks_repeat_mappers": (3, 3, 2, 2), "block_types_per_layer": ( ("SDCascadeResBlock", "SDCascadeTimestepBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ), "switch_level": None, "clip_text_pooled_in_channels": 32, "dropout": (0.1, 0.1, 0.1, 0.1), } model = StableCascadeUNet(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan prior_text_encoder = self.dummy_text_encoder prior_tokenizer = self.dummy_tokenizer components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "decoder": decoder, "scheduler": scheduler, "vqgan": vqgan, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": prior_tokenizer, "prior_prior": prior, "prior_scheduler": scheduler, "prior_feature_extractor": None, "prior_image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_stable_cascade(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=2e-2) @unittest.skip(reason="fp16 not supported") def test_float16_inference(self): super().test_float16_inference() @unittest.skip(reason="no callback test for combined pipeline") def test_callback_inputs(self): super().test_callback_inputs() def test_stable_cascade_combined_prompt_embeds(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeCombinedPipeline(**components) pipe.set_progress_bar_config(disable=None) prompt = "A photograph of a shiba inu, wearing a hat" ( prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled, ) = pipe.prior_pipe.encode_prompt(device, 1, 1, False, prompt=prompt) generator = torch.Generator(device=device) output_prompt = pipe( prompt=prompt, num_inference_steps=1, prior_num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) output_prompt_embeds = pipe( prompt=None, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, num_inference_steps=1, prior_num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) assert np.abs(output_prompt.images - output_prompt_embeds.images).max() < 1e-5
diffusers/tests/pipelines/stable_cascade/test_stable_cascade_combined.py/0
{ "file_path": "diffusers/tests/pipelines/stable_cascade/test_stable_cascade_combined.py", "repo_id": "diffusers", "token_count": 4571 }
163
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, numpy_cosine_similarity_distance, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineFromPipeTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionDiffEditPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionDiffEditPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) inverse_scheduler = DDIMInverseScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_zero=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): mask = floats_tensor((1, 16, 16), rng=random.Random(seed)).to(device) latents = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "np", } return inputs def get_dummy_mask_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "np", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "np", } return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_mask(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_mask_inputs(device) mask = pipe.generate_mask(**inputs) mask_slice = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16)) expected_slice = np.array([0] * 9) max_diff = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) self.assertEqual(mask[0, -3, -4], 0) def test_inversion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5160, 0.5115, 0.5060, 0.5456, 0.4704, 0.5060, 0.5019, 0.4405, 0.4726], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_inversion_dpm(self): device = "cpu" components = self.get_dummy_components() scheduler_args = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"} components["scheduler"] = DPMSolverMultistepScheduler(**scheduler_args) components["inverse_scheduler"] = DPMSolverMultistepInverseScheduler(**scheduler_args) pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5305, 0.4673, 0.5314, 0.5308, 0.4886, 0.5279, 0.5142, 0.4724, 0.4892], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @require_torch_gpu @nightly class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((256, 256)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_full(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.scheduler.clip_sample = True pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=5, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=5, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((256, 256)) ) / 255 ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 2e-1 @nightly @require_torch_gpu class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((768, 768)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_dpm(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.inverse_scheduler = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=25, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=25, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768)) ) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py", "repo_id": "diffusers", "token_count": 7367 }
164
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import ( CLIPProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENTextImagePipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import CLIPImageProjection from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineFromPipeTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class GligenTextImagePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionGLIGENTextImagePipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_images", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated-text-image", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") image_project = CLIPImageProjection(hidden_size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": image_encoder, "image_project": image_project, "processor": processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) gligen_images = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" ) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_images": [gligen_images], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_dict_tuple_outputs_equivalent(self): expected_slice = None if torch_device == "cpu": expected_slice = np.array([0.5052, 0.5546, 0.4567, 0.4770, 0.5195, 0.4085, 0.5026, 0.4909, 0.4495]) super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) def test_stable_diffusion_gligen_text_image_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
diffusers/tests/pipelines/stable_diffusion_gligen_text_image/test_stable_diffusion_gligen_text_image.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_gligen_text_image/test_stable_diffusion_gligen_text_image.py", "repo_id": "diffusers", "token_count": 3436 }
165
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, AutoencoderTiny, EDMDPMSolverMultistepScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} ) def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": image_encoder, "feature_extractor": feature_extractor, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4664, 0.4886, 0.4403, 0.6902, 0.5592, 0.4534, 0.5931, 0.5951, 0.5224]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5133, 0.4626, 0.4970, 0.6273, 0.5160, 0.6891, 0.6639, 0.5892, 0.5709]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) def test_stable_diffusion_xl_img2img_tiny_autoencoder(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.vae = self.get_dummy_tiny_autoencoder() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.0, 0.0, 0.0106, 0.0, 0.0, 0.0087, 0.0052, 0.0062, 0.0177]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 5 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) class StableDiffusionXLImg2ImgRefinerOnlyPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "tokenizer": None, "text_encoder": None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": None, "feature_extractor": None, } return components def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4745, 0.4924, 0.4338, 0.6468, 0.5547, 0.4419, 0.5646, 0.5897, 0.5146]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_img2img_prompt_embeds_only(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, _, pooled_prompt_embeds, _, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @slow class StableDiffusionXLImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_xl_img2img_playground(self): torch.manual_seed(0) model_path = "playgroundai/playground-v2.5-1024px-aesthetic" sd_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( model_path, torch_dtype=torch.float16, variant="fp16", add_watermarker=False ) sd_pipe.enable_model_cpu_offload() sd_pipe.scheduler = EDMDPMSolverMultistepScheduler.from_config( sd_pipe.scheduler.config, use_karras_sigmas=True ) sd_pipe.set_progress_bar_config(disable=None) prompt = "a photo of an astronaut riding a horse on mars" url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" init_image = load_image(url).convert("RGB") image = sd_pipe( prompt, num_inference_steps=30, guidance_scale=8.0, image=init_image, height=1024, width=1024, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 1024, 1024, 3) expected_slice = np.array([0.3519, 0.3149, 0.3364, 0.3505, 0.3402, 0.3371, 0.3554, 0.3495, 0.3333]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py", "repo_id": "diffusers", "token_count": 14626 }
166
import torch from diffusers import DDIMScheduler from .test_schedulers import SchedulerCommonTest class DDIMSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDIMScheduler,) forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample return sample def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1])) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_eta(self): for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]): self.check_over_forward(time_step=t, eta=eta) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 172.0067) < 1e-2 assert abs(result_mean.item() - 0.223967) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 52.5302) < 1e-2 assert abs(result_mean.item() - 0.0684) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.8295) < 1e-2 assert abs(result_mean.item() - 0.1951) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.0784) < 1e-2 assert abs(result_mean.item() - 0.1941) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 218.4379, but get {result_sum}" assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.2844, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_ddim.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ddim.py", "repo_id": "diffusers", "token_count": 3127 }
167
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class IPNDMSchedulerTest(SchedulerCommonTest): scheduler_classes = (IPNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = {"num_train_timesteps": 1000} config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample scheduler._step_index = None for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] scheduler.ets = dummy_past_residuals[:] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps, time_step=None) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 2540529) < 10
diffusers/tests/schedulers/test_scheduler_ipndm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ipndm.py", "repo_id": "diffusers", "token_count": 3120 }
168
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import ( MotionAdapter, ) from diffusers.utils.testing_utils import ( enable_full_determinism, ) enable_full_determinism() class MotionAdapterSingleFileTests(unittest.TestCase): model_class = MotionAdapter def test_single_file_components_version_v1_5(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt" repo_id = "guoyww/animatediff-motion-adapter-v1-5" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_components_version_v1_5_2(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt" repo_id = "guoyww/animatediff-motion-adapter-v1-5-2" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_components_version_v1_5_3(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt" repo_id = "guoyww/animatediff-motion-adapter-v1-5-3" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_components_version_sdxl_beta(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt" repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between pretrained loading and single file loading"
diffusers/tests/single_file/test_model_motion_adapter_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_model_motion_adapter_single_file.py", "repo_id": "diffusers", "token_count": 1629 }
169
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob import os import re import subprocess # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py DIFFUSERS_PATH = "src/diffusers" REPO_PATH = "." def _should_continue(line, indent): return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None def find_code_in_diffusers(object_name): """Find and return the code source code of `object_name`.""" parts = object_name.split(".") i = 0 # First let's find the module where our object lives. module = parts[i] while i < len(parts) and not os.path.isfile(os.path.join(DIFFUSERS_PATH, f"{module}.py")): i += 1 if i < len(parts): module = os.path.join(module, parts[i]) if i >= len(parts): raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.") with open( os.path.join(DIFFUSERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n", ) as f: lines = f.readlines() # Now let's find the class / func in the code! indent = "" line_index = 0 for name in parts[i + 1 :]: while ( line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lines): raise ValueError(f" {object_name} does not match any function or class in {module}.") # We found the beginning of the class / func, now let's find the end (when the indent diminishes). start_index = line_index while line_index < len(lines) and _should_continue(lines[line_index], indent): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 code_lines = lines[start_index:line_index] return "".join(code_lines) _re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") _re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") _re_fill_pattern = re.compile(r"<FILL\s+[^>]*>") def get_indent(code): lines = code.split("\n") idx = 0 while idx < len(lines) and len(lines[idx]) == 0: idx += 1 if idx < len(lines): return re.search(r"^(\s*)\S", lines[idx]).groups()[0] return "" def run_ruff(code): command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, _ = process.communicate(input=code.encode()) return stdout.decode() def stylify(code: str) -> str: """ Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. As `ruff` does not provide a python api this cannot be done on the fly. Args: code (`str`): The code to format. Returns: `str`: The formatted code. """ has_indent = len(get_indent(code)) > 0 if has_indent: code = f"class Bla:\n{code}" formatted_code = run_ruff(code) return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code def is_copy_consistent(filename, overwrite=False): """ Check if the code commented as a copy in `filename` matches the original. Return the differences or overwrites the content depending on `overwrite`. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() diffs = [] line_index = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lines): search = _re_copy_warning.search(lines[line_index]) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. indent, object_name, replace_pattern = search.groups() theoretical_code = find_code_in_diffusers(object_name) theoretical_indent = get_indent(theoretical_code) start_index = line_index + 1 if indent == theoretical_indent else line_index + 2 indent = theoretical_indent line_index = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. should_continue = True while line_index < len(lines) and should_continue: line_index += 1 if line_index >= len(lines): break line = lines[line_index] should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 observed_code_lines = lines[start_index:line_index] observed_code = "".join(observed_code_lines) # Remove any nested `Copied from` comments to avoid circular copies theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None] theoretical_code = "\n".join(theoretical_code) # Before comparing, use the `replace_pattern` on the original code. if len(replace_pattern) > 0: patterns = replace_pattern.replace("with", "").split(",") patterns = [_re_replace_pattern.search(p) for p in patterns] for pattern in patterns: if pattern is None: continue obj1, obj2, option = pattern.groups() theoretical_code = re.sub(obj1, obj2, theoretical_code) if option.strip() == "all-casing": theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code) theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code) # stylify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line theoretical_code = stylify(lines[start_index - 1] + theoretical_code) theoretical_code = theoretical_code[len(lines[start_index - 1]) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index]) if overwrite: lines = lines[:start_index] + [theoretical_code] + lines[line_index:] line_index = start_index + 1 if overwrite and len(diffs) > 0: # Warn the user a file has been modified. print(f"Detected changes, rewriting {filename}.") with open(filename, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) return diffs def check_copies(overwrite: bool = False): all_files = glob.glob(os.path.join(DIFFUSERS_PATH, "**/*.py"), recursive=True) diffs = [] for filename in all_files: new_diffs = is_copy_consistent(filename, overwrite) diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] if not overwrite and len(diffs) > 0: diff = "\n".join(diffs) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.", ) args = parser.parse_args() check_copies(args.fix_and_overwrite)
diffusers/utils/check_copies.py/0
{ "file_path": "diffusers/utils/check_copies.py", "repo_id": "diffusers", "token_count": 3397 }
170
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import packaging.version PATH_TO_EXAMPLES = "examples/" REPLACE_PATTERNS = { "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), "doc": (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } REPLACE_FILES = { "init": "src/diffusers/__init__.py", "setup": "setup.py", } README_FILE = "README.md" def update_version_in_file(fname, version, pattern): """Update the version in one file using a specific pattern.""" with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[pattern] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code) def update_version_in_examples(version): """Update the version in all examples files.""" for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, pattern="examples") def global_version_update(version, patch=False): """Update the version in all needed files.""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: update_version_in_examples(version) def clean_main_ref_in_model_list(): """Replace the links from main doc tp stable doc in the model list of the README.""" # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "๐Ÿค— Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc", "https://huggingface.co/docs/diffusers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version(): """Reads the current version in the __init__.""" with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) def pre_release_work(patch=False): """Do all the necessary pre-release steps.""" # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) # if not patch: # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() def post_release_work(): """Do all the necessary post-release steps.""" # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") args = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
diffusers/utils/release.py/0
{ "file_path": "diffusers/utils/release.py", "repo_id": "diffusers", "token_count": 2306 }
171
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Capture video feed from a camera as raw images.""" import argparse import datetime as dt from pathlib import Path import cv2 def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int): now = dt.datetime.now() capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}" if not capture_dir.exists(): capture_dir.mkdir(parents=True, exist_ok=True) # Opens the default webcam cap = cv2.VideoCapture(0) if not cap.isOpened(): print("Error: Could not open video stream.") return cap.set(cv2.CAP_PROP_FPS, fps) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) frame_index = 0 while True: ret, frame = cap.read() if not ret: print("Error: Could not read frame.") break cv2.imshow("Video Stream", frame) cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame) frame_index += 1 # Break the loop on 'q' key press if cv2.waitKey(1) & 0xFF == ord("q"): break # Release the capture and destroy all windows cap.release() cv2.destroyAllWindows() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output-dir", type=Path, default=Path("outputs/cam_capture/"), help="Directory where the capture images are written. A subfolder named with the current date & time will be created inside it for each capture.", ) parser.add_argument( "--fps", type=int, default=30, help="Frames Per Second of the capture.", ) parser.add_argument( "--width", type=int, default=1280, help="Width of the captured images.", ) parser.add_argument( "--height", type=int, default=720, help="Height of the captured images.", ) args = parser.parse_args() display_and_save_video_stream(**vars(args))
lerobot/benchmarks/video/capture_camera_feed.py/0
{ "file_path": "lerobot/benchmarks/video/capture_camera_feed.py", "repo_id": "lerobot", "token_count": 1061 }
172
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains download scripts for raw datasets. Example of usage: ``` python lerobot/common/datasets/push_dataset_to_hub/_download_raw.py \ --raw-dir data/lerobot-raw/pusht_raw \ --repo-id lerobot-raw/pusht_raw ``` """ import argparse import logging import warnings from pathlib import Path from huggingface_hub import snapshot_download from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id # {raw_repo_id: raw_format} AVAILABLE_RAW_REPO_IDS = { "lerobot-raw/aloha_mobile_cabinet_raw": "aloha_hdf5", "lerobot-raw/aloha_mobile_chair_raw": "aloha_hdf5", "lerobot-raw/aloha_mobile_elevator_raw": "aloha_hdf5", "lerobot-raw/aloha_mobile_shrimp_raw": "aloha_hdf5", "lerobot-raw/aloha_mobile_wash_pan_raw": "aloha_hdf5", "lerobot-raw/aloha_mobile_wipe_wine_raw": "aloha_hdf5", "lerobot-raw/aloha_sim_insertion_human_raw": "aloha_hdf5", "lerobot-raw/aloha_sim_insertion_scripted_raw": "aloha_hdf5", "lerobot-raw/aloha_sim_transfer_cube_human_raw": "aloha_hdf5", "lerobot-raw/aloha_sim_transfer_cube_scripted_raw": "aloha_hdf5", "lerobot-raw/aloha_static_battery_raw": "aloha_hdf5", "lerobot-raw/aloha_static_candy_raw": "aloha_hdf5", "lerobot-raw/aloha_static_coffee_new_raw": "aloha_hdf5", "lerobot-raw/aloha_static_coffee_raw": "aloha_hdf5", "lerobot-raw/aloha_static_cups_open_raw": "aloha_hdf5", "lerobot-raw/aloha_static_fork_pick_up_raw": "aloha_hdf5", "lerobot-raw/aloha_static_pingpong_test_raw": "aloha_hdf5", "lerobot-raw/aloha_static_pro_pencil_raw": "aloha_hdf5", "lerobot-raw/aloha_static_screw_driver_raw": "aloha_hdf5", "lerobot-raw/aloha_static_tape_raw": "aloha_hdf5", "lerobot-raw/aloha_static_thread_velcro_raw": "aloha_hdf5", "lerobot-raw/aloha_static_towel_raw": "aloha_hdf5", "lerobot-raw/aloha_static_vinh_cup_left_raw": "aloha_hdf5", "lerobot-raw/aloha_static_vinh_cup_raw": "aloha_hdf5", "lerobot-raw/aloha_static_ziploc_slide_raw": "aloha_hdf5", "lerobot-raw/umi_cup_in_the_wild_raw": "umi_zarr", "lerobot-raw/pusht_raw": "pusht_zarr", "lerobot-raw/unitreeh1_fold_clothes_raw": "aloha_hdf5", "lerobot-raw/unitreeh1_rearrange_objects_raw": "aloha_hdf5", "lerobot-raw/unitreeh1_two_robot_greeting_raw": "aloha_hdf5", "lerobot-raw/unitreeh1_warehouse_raw": "aloha_hdf5", "lerobot-raw/xarm_lift_medium_raw": "xarm_pkl", "lerobot-raw/xarm_lift_medium_replay_raw": "xarm_pkl", "lerobot-raw/xarm_push_medium_raw": "xarm_pkl", "lerobot-raw/xarm_push_medium_replay_raw": "xarm_pkl", "lerobot-raw/fractal20220817_data_raw": "openx_rlds.fractal20220817_data", "lerobot-raw/kuka_raw": "openx_rlds.kuka", "lerobot-raw/bridge_openx_raw": "openx_rlds.bridge_openx", "lerobot-raw/taco_play_raw": "openx_rlds.taco_play", "lerobot-raw/jaco_play_raw": "openx_rlds.jaco_play", "lerobot-raw/berkeley_cable_routing_raw": "openx_rlds.berkeley_cable_routing", "lerobot-raw/roboturk_raw": "openx_rlds.roboturk", "lerobot-raw/nyu_door_opening_surprising_effectiveness_raw": "openx_rlds.nyu_door_opening_surprising_effectiveness", "lerobot-raw/viola_raw": "openx_rlds.viola", "lerobot-raw/berkeley_autolab_ur5_raw": "openx_rlds.berkeley_autolab_ur5", "lerobot-raw/toto_raw": "openx_rlds.toto", "lerobot-raw/language_table_raw": "openx_rlds.language_table", "lerobot-raw/columbia_cairlab_pusht_real_raw": "openx_rlds.columbia_cairlab_pusht_real", "lerobot-raw/stanford_kuka_multimodal_dataset_raw": "openx_rlds.stanford_kuka_multimodal_dataset", "lerobot-raw/nyu_rot_dataset_raw": "openx_rlds.nyu_rot_dataset", "lerobot-raw/io_ai_tech_raw": "openx_rlds.io_ai_tech", "lerobot-raw/stanford_hydra_dataset_raw": "openx_rlds.stanford_hydra_dataset", "lerobot-raw/austin_buds_dataset_raw": "openx_rlds.austin_buds_dataset", "lerobot-raw/nyu_franka_play_dataset_raw": "openx_rlds.nyu_franka_play_dataset", "lerobot-raw/maniskill_dataset_raw": "openx_rlds.maniskill_dataset", "lerobot-raw/furniture_bench_dataset_raw": "openx_rlds.furniture_bench_dataset", "lerobot-raw/cmu_franka_exploration_dataset_raw": "openx_rlds.cmu_franka_exploration_dataset", "lerobot-raw/ucsd_kitchen_dataset_raw": "openx_rlds.ucsd_kitchen_dataset", "lerobot-raw/ucsd_pick_and_place_dataset_raw": "openx_rlds.ucsd_pick_and_place_dataset", "lerobot-raw/spoc_raw": "openx_rlds.spoc", "lerobot-raw/austin_sailor_dataset_raw": "openx_rlds.austin_sailor_dataset", "lerobot-raw/austin_sirius_dataset_raw": "openx_rlds.austin_sirius_dataset", "lerobot-raw/bc_z_raw": "openx_rlds.bc_z", "lerobot-raw/utokyo_pr2_opening_fridge_raw": "openx_rlds.utokyo_pr2_opening_fridge", "lerobot-raw/utokyo_pr2_tabletop_manipulation_raw": "openx_rlds.utokyo_pr2_tabletop_manipulation", "lerobot-raw/utokyo_xarm_pick_and_place_raw": "openx_rlds.utokyo_xarm_pick_and_place", "lerobot-raw/utokyo_xarm_bimanual_raw": "openx_rlds.utokyo_xarm_bimanual", "lerobot-raw/utokyo_saytap_raw": "openx_rlds.utokyo_saytap", "lerobot-raw/robo_net_raw": "openx_rlds.robo_net", "lerobot-raw/robo_set_raw": "openx_rlds.robo_set", "lerobot-raw/berkeley_mvp_raw": "openx_rlds.berkeley_mvp", "lerobot-raw/berkeley_rpt_raw": "openx_rlds.berkeley_rpt", "lerobot-raw/kaist_nonprehensile_raw": "openx_rlds.kaist_nonprehensile", "lerobot-raw/stanford_mask_vit_raw": "openx_rlds.stanford_mask_vit", "lerobot-raw/tokyo_u_lsmo_raw": "openx_rlds.tokyo_u_lsmo", "lerobot-raw/dlr_sara_pour_raw": "openx_rlds.dlr_sara_pour", "lerobot-raw/dlr_sara_grid_clamp_raw": "openx_rlds.dlr_sara_grid_clamp", "lerobot-raw/dlr_edan_shared_control_raw": "openx_rlds.dlr_edan_shared_control", "lerobot-raw/asu_table_top_raw": "openx_rlds.asu_table_top", "lerobot-raw/stanford_robocook_raw": "openx_rlds.stanford_robocook", "lerobot-raw/imperialcollege_sawyer_wrist_cam_raw": "openx_rlds.imperialcollege_sawyer_wrist_cam", "lerobot-raw/iamlab_cmu_pickup_insert_raw": "openx_rlds.iamlab_cmu_pickup_insert", "lerobot-raw/uiuc_d3field_raw": "openx_rlds.uiuc_d3field", "lerobot-raw/utaustin_mutex_raw": "openx_rlds.utaustin_mutex", "lerobot-raw/berkeley_fanuc_manipulation_raw": "openx_rlds.berkeley_fanuc_manipulation", "lerobot-raw/cmu_playing_with_food_raw": "openx_rlds.cmu_playing_with_food", "lerobot-raw/cmu_play_fusion_raw": "openx_rlds.cmu_play_fusion", "lerobot-raw/cmu_stretch_raw": "openx_rlds.cmu_stretch", "lerobot-raw/berkeley_gnm_recon_raw": "openx_rlds.berkeley_gnm_recon", "lerobot-raw/berkeley_gnm_cory_hall_raw": "openx_rlds.berkeley_gnm_cory_hall", "lerobot-raw/berkeley_gnm_sac_son_raw": "openx_rlds.berkeley_gnm_sac_son", "lerobot-raw/droid_raw": "openx_rlds.droid", "lerobot-raw/droid_100_raw": "openx_rlds.droid100", "lerobot-raw/fmb_raw": "openx_rlds.fmb", "lerobot-raw/dobbe_raw": "openx_rlds.dobbe", "lerobot-raw/usc_cloth_sim_raw": "openx_rlds.usc_cloth_sim", "lerobot-raw/plex_robosuite_raw": "openx_rlds.plex_robosuite", "lerobot-raw/conq_hose_manipulation_raw": "openx_rlds.conq_hose_manipulation", "lerobot-raw/vima_raw": "openx_rlds.vima", "lerobot-raw/robot_vqa_raw": "openx_rlds.robot_vqa", "lerobot-raw/mimic_play_raw": "openx_rlds.mimic_play", "lerobot-raw/tidybot_raw": "openx_rlds.tidybot", "lerobot-raw/eth_agent_affordances_raw": "openx_rlds.eth_agent_affordances", } def download_raw(raw_dir: Path, repo_id: str): check_repo_id(repo_id) user_id, dataset_id = repo_id.split("/") if not dataset_id.endswith("_raw"): warnings.warn( f"""`dataset_id` ({dataset_id}) doesn't end with '_raw' (e.g. 'lerobot/pusht_raw'). Following this naming convention by renaming your repository is advised, but not mandatory.""", stacklevel=1, ) # Send warning if raw_dir isn't well formated if raw_dir.parts[-2] != user_id or raw_dir.parts[-1] != dataset_id: warnings.warn( f"""`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that match the `repo_id` (e.g. 'data/lerobot/pusht_raw'). Following this naming convention is advised, but not mandatory.""", stacklevel=1, ) raw_dir.mkdir(parents=True, exist_ok=True) logging.info(f"Start downloading from huggingface.co/{user_id} for {dataset_id}") snapshot_download(repo_id, repo_type="dataset", local_dir=raw_dir) logging.info(f"Finish downloading from huggingface.co/{user_id} for {dataset_id}") def download_all_raw_datasets(data_dir: Path | None = None): if data_dir is None: data_dir = Path("data") for repo_id in AVAILABLE_RAW_REPO_IDS: raw_dir = data_dir / repo_id download_raw(raw_dir, repo_id) def main(): parser = argparse.ArgumentParser( description=f"""A script to download raw datasets from Hugging Face hub to a local directory. Here is a non exhaustive list of available repositories to use in `--repo-id`: {list(AVAILABLE_RAW_REPO_IDS.keys())}""", ) parser.add_argument( "--raw-dir", type=Path, required=True, help="Directory containing input raw datasets (e.g. `data/aloha_mobile_chair_raw` or `data/pusht_raw).", ) parser.add_argument( "--repo-id", type=str, required=True, help="""Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht_raw`, `cadene/aloha_sim_insertion_human_raw`).""", ) args = parser.parse_args() download_raw(**vars(args)) if __name__ == "__main__": main()
lerobot/lerobot/common/datasets/push_dataset_to_hub/_download_raw.py/0
{ "file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/_download_raw.py", "repo_id": "lerobot", "token_count": 4911 }
173
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from typing import Any, Callable, Dict, Sequence import torch from torchvision.transforms import v2 from torchvision.transforms.v2 import Transform from torchvision.transforms.v2 import functional as F # noqa: N812 class RandomSubsetApply(Transform): """Apply a random subset of N transformations from a list of transformations. Args: transforms: list of transformations. p: represents the multinomial probabilities (with no replacement) used for sampling the transform. If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms have the same probability. n_subset: number of transformations to apply. If ``None``, all transforms are applied. Must be in [1, len(transforms)]. random_order: apply transformations in a random order. """ def __init__( self, transforms: Sequence[Callable], p: list[float] | None = None, n_subset: int | None = None, random_order: bool = False, ) -> None: super().__init__() if not isinstance(transforms, Sequence): raise TypeError("Argument transforms should be a sequence of callables") if p is None: p = [1] * len(transforms) elif len(p) != len(transforms): raise ValueError( f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}" ) if n_subset is None: n_subset = len(transforms) elif not isinstance(n_subset, int): raise TypeError("n_subset should be an int or None") elif not (1 <= n_subset <= len(transforms)): raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]") self.transforms = transforms total = sum(p) self.p = [prob / total for prob in p] self.n_subset = n_subset self.random_order = random_order def forward(self, *inputs: Any) -> Any: needs_unpacking = len(inputs) > 1 selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset) if not self.random_order: selected_indices = selected_indices.sort().values selected_transforms = [self.transforms[i] for i in selected_indices] for transform in selected_transforms: outputs = transform(*inputs) inputs = outputs if needs_unpacking else (outputs,) return outputs def extra_repr(self) -> str: return ( f"transforms={self.transforms}, " f"p={self.p}, " f"n_subset={self.n_subset}, " f"random_order={self.random_order}" ) class SharpnessJitter(Transform): """Randomly change the sharpness of an image or video. Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly. While v2.RandomAdjustSharpness applies โ€” with a given probability โ€” a fixed sharpness_factor to an image, SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of augmentations as a result. A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness by a factor of 2. If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. Args: sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from [max(0, 1 - sharpness), 1 + sharpness] or the given [min, max]. Should be non negative numbers. """ def __init__(self, sharpness: float | Sequence[float]) -> None: super().__init__() self.sharpness = self._check_input(sharpness) def _check_input(self, sharpness): if isinstance(sharpness, (int, float)): if sharpness < 0: raise ValueError("If sharpness is a single number, it must be non negative.") sharpness = [1.0 - sharpness, 1.0 + sharpness] sharpness[0] = max(sharpness[0], 0.0) elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2: sharpness = [float(v) for v in sharpness] else: raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.") if not 0.0 <= sharpness[0] <= sharpness[1]: raise ValueError(f"sharpnesss values should be between (0., inf), but got {sharpness}.") return float(sharpness[0]), float(sharpness[1]) def _generate_value(self, left: float, right: float) -> float: return torch.empty(1).uniform_(left, right).item() def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: sharpness_factor = self._generate_value(self.sharpness[0], self.sharpness[1]) return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor) def get_image_transforms( brightness_weight: float = 1.0, brightness_min_max: tuple[float, float] | None = None, contrast_weight: float = 1.0, contrast_min_max: tuple[float, float] | None = None, saturation_weight: float = 1.0, saturation_min_max: tuple[float, float] | None = None, hue_weight: float = 1.0, hue_min_max: tuple[float, float] | None = None, sharpness_weight: float = 1.0, sharpness_min_max: tuple[float, float] | None = None, max_num_transforms: int | None = None, random_order: bool = False, ): def check_value(name, weight, min_max): if min_max is not None: if len(min_max) != 2: raise ValueError( f"`{name}_min_max` is expected to be a tuple of 2 dimensions, but {min_max} provided." ) if weight < 0.0: raise ValueError( f"`{name}_weight` is expected to be 0 or positive, but is negative ({weight})." ) check_value("brightness", brightness_weight, brightness_min_max) check_value("contrast", contrast_weight, contrast_min_max) check_value("saturation", saturation_weight, saturation_min_max) check_value("hue", hue_weight, hue_min_max) check_value("sharpness", sharpness_weight, sharpness_min_max) weights = [] transforms = [] if brightness_min_max is not None and brightness_weight > 0.0: weights.append(brightness_weight) transforms.append(v2.ColorJitter(brightness=brightness_min_max)) if contrast_min_max is not None and contrast_weight > 0.0: weights.append(contrast_weight) transforms.append(v2.ColorJitter(contrast=contrast_min_max)) if saturation_min_max is not None and saturation_weight > 0.0: weights.append(saturation_weight) transforms.append(v2.ColorJitter(saturation=saturation_min_max)) if hue_min_max is not None and hue_weight > 0.0: weights.append(hue_weight) transforms.append(v2.ColorJitter(hue=hue_min_max)) if sharpness_min_max is not None and sharpness_weight > 0.0: weights.append(sharpness_weight) transforms.append(SharpnessJitter(sharpness=sharpness_min_max)) n_subset = len(transforms) if max_num_transforms is not None: n_subset = min(n_subset, max_num_transforms) if n_subset == 0: return v2.Identity() else: # TODO(rcadene, aliberts): add v2.ToDtype float16? return RandomSubsetApply(transforms, p=weights, n_subset=n_subset, random_order=random_order)
lerobot/lerobot/common/datasets/transforms.py/0
{ "file_path": "lerobot/lerobot/common/datasets/transforms.py", "repo_id": "lerobot", "token_count": 3213 }
174
#!/usr/bin/env python # Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru # and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field @dataclass class VQBeTConfig: """Configuration class for VQ-BeT. Defaults are configured for training with PushT providing proprioceptive and single camera observations. The parameters you will most likely need to change are the ones which depend on the environment / sensors. Those are: `input_shapes` and `output_shapes`. Notes on the inputs and outputs: - "observation.state" is required as an input key. - At least one key starting with "observation.image is required as an input. - If there are multiple keys beginning with "observation.image" they are treated as multiple camera views. Right now we only support all images having the same shape. - "action" is required as an output key. Args: n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the current step and additional steps going back). n_action_pred_token: Total number of current token and future tokens that VQ-BeT predicts. action_chunk_size: Action chunk size of each action prediction token. input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents the input data name, and the value is a list indicating the dimensions of the corresponding data. For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution. Importantly, shapes doesnt include batch dimension or temporal dimension. output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents the output data name, and the value is a list indicating the dimensions of the corresponding data. For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. Importantly, shapes doesnt include batch dimension or temporal dimension. input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), and the value specifies the normalization mode to apply. The two available modes are "mean_std" which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a [-1, 1] range. output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the original scale. Note that this is also used for normalizing the training targets. vision_backbone: Name of the torchvision resnet backbone to use for encoding images. crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit within the image size. If None, no cropping is done. crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval mode). pretrained_backbone_weights: Pretrained weights from torchvision to initalize the backbone. `None` means no pretrained weights. use_group_norm: Whether to replace batch normalization with group normalization in the backbone. The group sizes are set to be about 16 (to be precise, feature_dim // 16). spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax. n_vqvae_training_steps: Number of optimization steps for training Residual VQ. vqvae_n_embed: Number of embedding vectors in the RVQ dictionary (each layer). vqvae_embedding_dim: Dimension of each embedding vector in the RVQ dictionary. vqvae_enc_hidden_dim: Size of hidden dimensions of Encoder / Decoder part of Residaul VQ-VAE gpt_block_size: Max block size of minGPT (should be larger than the number of input tokens) gpt_input_dim: Size of output input of GPT. This is also used as the dimension of observation features. gpt_output_dim: Size of output dimension of GPT. This is also used as a input dimension of offset / bin prediction headers. gpt_n_layer: Number of layers of GPT gpt_n_head: Number of headers of GPT gpt_hidden_dim: Size of hidden dimensions of GPT dropout: Dropout rate for GPT mlp_hidden_dim: Size of hidden dimensions of offset header / bin prediction headers parts of VQ-BeT offset_loss_weight: A constant that is multiplied to the offset loss primary_code_loss_weight: A constant that is multiplied to the primary code prediction loss secondary_code_loss_weight: A constant that is multiplied to the secondary code prediction loss bet_softmax_temperature: Sampling temperature of code for rollout with VQ-BeT sequentially_select: Whether select code of primary / secondary as sequentially (pick primary code, and then select secodnary code), or at the same time. """ # Inputs / output structure. n_obs_steps: int = 5 n_action_pred_token: int = 3 action_chunk_size: int = 5 input_shapes: dict[str, list[int]] = field( default_factory=lambda: { "observation.image": [3, 96, 96], "observation.state": [2], } ) output_shapes: dict[str, list[int]] = field( default_factory=lambda: { "action": [2], } ) # Normalization / Unnormalization input_normalization_modes: dict[str, str] = field( default_factory=lambda: { "observation.image": "mean_std", "observation.state": "min_max", } ) output_normalization_modes: dict[str, str] = field(default_factory=lambda: {"action": "min_max"}) # Architecture / modeling. # Vision backbone. vision_backbone: str = "resnet18" crop_shape: tuple[int, int] | None = (84, 84) crop_is_random: bool = True pretrained_backbone_weights: str | None = None use_group_norm: bool = True spatial_softmax_num_keypoints: int = 32 # VQ-VAE n_vqvae_training_steps: int = 20000 vqvae_n_embed: int = 16 vqvae_embedding_dim: int = 256 vqvae_enc_hidden_dim: int = 128 # VQ-BeT gpt_block_size: int = 500 gpt_input_dim: int = 512 gpt_output_dim: int = 512 gpt_n_layer: int = 8 gpt_n_head: int = 8 gpt_hidden_dim: int = 512 dropout: float = 0.1 mlp_hidden_dim: int = 1024 offset_loss_weight: float = 10000.0 primary_code_loss_weight: float = 5.0 secondary_code_loss_weight: float = 0.5 bet_softmax_temperature: float = 0.1 sequentially_select: bool = False def __post_init__(self): """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." ) image_keys = {k for k in self.input_shapes if k.startswith("observation.image")} if self.crop_shape is not None: for image_key in image_keys: if ( self.crop_shape[0] > self.input_shapes[image_key][1] or self.crop_shape[1] > self.input_shapes[image_key][2] ): raise ValueError( f"`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} " f"for `crop_shape` and {self.input_shapes[image_key]} for " "`input_shapes[{image_key}]`." ) # Check that all input images have the same shape. first_image_key = next(iter(image_keys)) for image_key in image_keys: if self.input_shapes[image_key] != self.input_shapes[first_image_key]: raise ValueError( f"`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we " "expect all image shapes to match." )
lerobot/lerobot/common/policies/vqbet/configuration_vqbet.py/0
{ "file_path": "lerobot/lerobot/common/policies/vqbet/configuration_vqbet.py", "repo_id": "lerobot", "token_count": 3273 }
175
# @package _global_ fps: 50 eval: # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). # set it to false to avoid some problems of the aloha env use_async_envs: false env: name: aloha task: AlohaInsertion-v0 state_dim: 14 action_dim: 14 fps: ${fps} episode_length: 400 gym: obs_type: pixels_agent_pos render_mode: rgb_array
lerobot/lerobot/configs/env/aloha.yaml/0
{ "file_path": "lerobot/lerobot/configs/env/aloha.yaml", "repo_id": "lerobot", "token_count": 145 }
176
""" Utilities to control a robot. Useful to record a dataset, replay a recorded episode, run the policy on your robot and record an evaluation dataset, and to recalibrate your robot if needed. Examples of usage: - Recalibrate your robot: ```bash python lerobot/scripts/control_robot.py calibrate ``` - Unlimited teleoperation at highest frequency (~200 Hz is expected), to exit with CTRL+C: ```bash python lerobot/scripts/control_robot.py teleoperate # Remove the cameras from the robot definition. They are not used in 'teleoperate' anyway. python lerobot/scripts/control_robot.py teleoperate --robot-overrides '~cameras' ``` - Unlimited teleoperation at a limited frequency of 30 Hz, to simulate data recording frequency: ```bash python lerobot/scripts/control_robot.py teleoperate \ --fps 30 ``` - Record one episode in order to test replay: ```bash python lerobot/scripts/control_robot.py record \ --fps 30 \ --root tmp/data \ --repo-id $USER/koch_test \ --num-episodes 1 \ --run-compute-stats 0 ``` - Visualize dataset: ```bash python lerobot/scripts/visualize_dataset.py \ --root tmp/data \ --repo-id $USER/koch_test \ --episode-index 0 ``` - Replay this test episode: ```bash python lerobot/scripts/control_robot.py replay \ --fps 30 \ --root tmp/data \ --repo-id $USER/koch_test \ --episode 0 ``` - Record a full dataset in order to train a policy, with 2 seconds of warmup, 30 seconds of recording for each episode, and 10 seconds to reset the environment in between episodes: ```bash python lerobot/scripts/control_robot.py record \ --fps 30 \ --root data \ --repo-id $USER/koch_pick_place_lego \ --num-episodes 50 \ --warmup-time-s 2 \ --episode-time-s 30 \ --reset-time-s 10 ``` **NOTE**: You can use your keyboard to control data recording flow. - Tap right arrow key '->' to early exit while recording an episode and go to resseting the environment. - Tap right arrow key '->' to early exit while resetting the environment and got to recording the next episode. - Tap left arrow key '<-' to early exit and re-record the current episode. - Tap escape key 'esc' to stop the data recording. This might require a sudo permission to allow your terminal to monitor keyboard events. **NOTE**: You can resume/continue data recording by running the same data recording command twice. To avoid resuming by deleting the dataset, use `--force-override 1`. - Train on this dataset with the ACT policy: ```bash DATA_DIR=data python lerobot/scripts/train.py \ policy=act_koch_real \ env=koch_real \ dataset_repo_id=$USER/koch_pick_place_lego \ hydra.run.dir=outputs/train/act_koch_real ``` - Run the pretrained policy on the robot: ```bash python lerobot/scripts/control_robot.py record \ --fps 30 \ --root data \ --repo-id $USER/eval_act_koch_real \ --num-episodes 10 \ --warmup-time-s 2 \ --episode-time-s 30 \ --reset-time-s 10 -p outputs/train/act_koch_real/checkpoints/080000/pretrained_model ``` """ import argparse import concurrent.futures import json import logging import os import platform import shutil import time import traceback from contextlib import nullcontext from functools import cache from pathlib import Path import cv2 import torch import tqdm from omegaconf import DictConfig from PIL import Image from termcolor import colored # from safetensors.torch import load_file, save_file from lerobot.common.datasets.compute_stats import compute_stats from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import to_hf_dataset from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding from lerobot.common.datasets.utils import calculate_episode_data_index, create_branch from lerobot.common.datasets.video_utils import encode_video_frames from lerobot.common.policies.factory import make_policy from lerobot.common.robot_devices.robots.factory import make_robot from lerobot.common.robot_devices.robots.utils import Robot from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed from lerobot.scripts.eval import get_pretrained_policy_path from lerobot.scripts.push_dataset_to_hub import ( push_dataset_card_to_hub, push_meta_data_to_hub, push_videos_to_hub, save_meta_data, ) ######################################################################################## # Utilities ######################################################################################## def say(text, blocking=False): # Check if mac, linux, or windows. if platform.system() == "Darwin": cmd = f'say "{text}"' elif platform.system() == "Linux": cmd = f'spd-say "{text}"' elif platform.system() == "Windows": cmd = ( 'PowerShell -Command "Add-Type -AssemblyName System.Speech; ' f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')\"" ) if not blocking and platform.system() in ["Darwin", "Linux"]: # TODO(rcadene): Make it work for Windows # Use the ampersand to run command in the background cmd += " &" os.system(cmd) def save_image(img_tensor, key, frame_index, episode_index, videos_dir): img = Image.fromarray(img_tensor.numpy()) path = videos_dir / f"{key}_episode_{episode_index:06d}" / f"frame_{frame_index:06d}.png" path.parent.mkdir(parents=True, exist_ok=True) img.save(str(path), quality=100) def busy_wait(seconds): # Significantly more accurate than `time.sleep`, and mendatory for our use case, # but it consumes CPU cycles. # TODO(rcadene): find an alternative: from python 11, time.sleep is precise end_time = time.perf_counter() + seconds while time.perf_counter() < end_time: pass def none_or_int(value): if value == "None": return None return int(value) def log_control_info(robot, dt_s, episode_index=None, frame_index=None, fps=None): log_items = [] if episode_index is not None: log_items += [f"ep:{episode_index}"] if frame_index is not None: log_items += [f"frame:{frame_index}"] def log_dt(shortname, dt_val_s): nonlocal log_items log_items += [f"{shortname}:{dt_val_s * 1000:5.2f} ({1/ dt_val_s:3.1f}hz)"] # total step time displayed in milliseconds and its frequency log_dt("dt", dt_s) for name in robot.leader_arms: key = f"read_leader_{name}_pos_dt_s" if key in robot.logs: log_dt("dtRlead", robot.logs[key]) for name in robot.follower_arms: key = f"write_follower_{name}_goal_pos_dt_s" if key in robot.logs: log_dt("dtWfoll", robot.logs[key]) key = f"read_follower_{name}_pos_dt_s" if key in robot.logs: log_dt("dtRfoll", robot.logs[key]) for name in robot.cameras: key = f"read_camera_{name}_dt_s" if key in robot.logs: log_dt(f"dtR{name}", robot.logs[key]) info_str = " ".join(log_items) if fps is not None: actual_fps = 1 / dt_s if actual_fps < fps - 1: info_str = colored(info_str, "yellow") logging.info(info_str) @cache def is_headless(): """Detects if python is running without a monitor.""" try: import pynput # noqa return False except Exception: print( "Error trying to import pynput. Switching to headless mode. " "As a result, the video stream from the cameras won't be shown, " "and you won't be able to change the control flow with keyboards. " "For more info, see traceback below.\n" ) traceback.print_exc() print() return True ######################################################################################## # Control modes ######################################################################################## def calibrate(robot: Robot): if robot.calibration_path.exists(): print(f"Removing '{robot.calibration_path}'") robot.calibration_path.unlink() if robot.is_connected: robot.disconnect() # Calling `connect` automatically runs calibration # when the calibration file is missing robot.connect() def teleoperate(robot: Robot, fps: int | None = None, teleop_time_s: float | None = None): # TODO(rcadene): Add option to record logs if not robot.is_connected: robot.connect() start_teleop_t = time.perf_counter() while True: start_loop_t = time.perf_counter() robot.teleop_step() if fps is not None: dt_s = time.perf_counter() - start_loop_t busy_wait(1 / fps - dt_s) dt_s = time.perf_counter() - start_loop_t log_control_info(robot, dt_s, fps=fps) if teleop_time_s is not None and time.perf_counter() - start_teleop_t > teleop_time_s: break def record( robot: Robot, policy: torch.nn.Module | None = None, hydra_cfg: DictConfig | None = None, fps: int | None = None, root="data", repo_id="lerobot/debug", warmup_time_s=2, episode_time_s=10, reset_time_s=5, num_episodes=50, video=True, run_compute_stats=True, push_to_hub=True, tags=None, num_image_writers=8, force_override=False, ): # TODO(rcadene): Add option to record logs # TODO(rcadene): Clean this function via decomposition in higher level functions _, dataset_name = repo_id.split("/") if dataset_name.startswith("eval_") and policy is None: raise ValueError( f"Your dataset name begins by 'eval_' ({dataset_name}) but no policy is provided ({policy})." ) if not video: raise NotImplementedError() if not robot.is_connected: robot.connect() local_dir = Path(root) / repo_id if local_dir.exists() and force_override: shutil.rmtree(local_dir) episodes_dir = local_dir / "episodes" episodes_dir.mkdir(parents=True, exist_ok=True) videos_dir = local_dir / "videos" videos_dir.mkdir(parents=True, exist_ok=True) # Logic to resume data recording rec_info_path = episodes_dir / "data_recording_info.json" if rec_info_path.exists(): with open(rec_info_path) as f: rec_info = json.load(f) episode_index = rec_info["last_episode_index"] + 1 else: episode_index = 0 if is_headless(): logging.info( "Headless environment detected. On-screen cameras display and keyboard inputs will not be available." ) # Allow to exit early while recording an episode or resetting the environment, # by tapping the right arrow key '->'. This might require a sudo permission # to allow your terminal to monitor keyboard events. exit_early = False rerecord_episode = False stop_recording = False # Only import pynput if not in a headless environment if not is_headless(): from pynput import keyboard def on_press(key): nonlocal exit_early, rerecord_episode, stop_recording try: if key == keyboard.Key.right: print("Right arrow key pressed. Exiting loop...") exit_early = True elif key == keyboard.Key.left: print("Left arrow key pressed. Exiting loop and rerecord the last episode...") rerecord_episode = True exit_early = True elif key == keyboard.Key.esc: print("Escape key pressed. Stopping data recording...") stop_recording = True exit_early = True except Exception as e: print(f"Error handling key press: {e}") listener = keyboard.Listener(on_press=on_press) listener.start() # Load policy if any if policy is not None: # Check device is available device = get_safe_torch_device(hydra_cfg.device, log=True) policy.eval() policy.to(device) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True set_global_seed(hydra_cfg.seed) # override fps using policy fps fps = hydra_cfg.env.fps # Execute a few seconds without recording data, to give times # to the robot devices to connect and start synchronizing. timestamp = 0 start_warmup_t = time.perf_counter() is_warmup_print = False while timestamp < warmup_time_s: if not is_warmup_print: logging.info("Warming up (no data recording)") say("Warming up") is_warmup_print = True start_loop_t = time.perf_counter() if policy is None: observation, action = robot.teleop_step(record_data=True) else: observation = robot.capture_observation() if not is_headless(): image_keys = [key for key in observation if "image" in key] for key in image_keys: cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR)) cv2.waitKey(1) dt_s = time.perf_counter() - start_loop_t busy_wait(1 / fps - dt_s) dt_s = time.perf_counter() - start_loop_t log_control_info(robot, dt_s, fps=fps) timestamp = time.perf_counter() - start_warmup_t # Save images using threads to reach high fps (30 and more) # Using `with` to exist smoothly if an execption is raised. # Using only 4 worker threads to avoid blocking the main thread. futures = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_image_writers) as executor: # Start recording all episodes while episode_index < num_episodes: logging.info(f"Recording episode {episode_index}") say(f"Recording episode {episode_index}") ep_dict = {} frame_index = 0 timestamp = 0 start_episode_t = time.perf_counter() while timestamp < episode_time_s: start_loop_t = time.perf_counter() if policy is None: observation, action = robot.teleop_step(record_data=True) else: observation = robot.capture_observation() image_keys = [key for key in observation if "image" in key] not_image_keys = [key for key in observation if "image" not in key] for key in image_keys: futures += [ executor.submit( save_image, observation[key], key, frame_index, episode_index, videos_dir ) ] if not is_headless(): image_keys = [key for key in observation if "image" in key] for key in image_keys: cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR)) cv2.waitKey(1) for key in not_image_keys: if key not in ep_dict: ep_dict[key] = [] ep_dict[key].append(observation[key]) if policy is not None: with ( torch.inference_mode(), torch.autocast(device_type=device.type) if device.type == "cuda" and hydra_cfg.use_amp else nullcontext(), ): # Convert to pytorch format: channel first and float32 in [0,1] with batch dimension for name in observation: if "image" in name: observation[name] = observation[name].type(torch.float32) / 255 observation[name] = observation[name].permute(2, 0, 1).contiguous() observation[name] = observation[name].unsqueeze(0) observation[name] = observation[name].to(device) # Compute the next action with the policy # based on the current observation action = policy.select_action(observation) # Remove batch dimension action = action.squeeze(0) # Move to cpu, if not already the case action = action.to("cpu") # Order the robot to move robot.send_action(action) action = {"action": action} for key in action: if key not in ep_dict: ep_dict[key] = [] ep_dict[key].append(action[key]) frame_index += 1 dt_s = time.perf_counter() - start_loop_t busy_wait(1 / fps - dt_s) dt_s = time.perf_counter() - start_loop_t log_control_info(robot, dt_s, fps=fps) timestamp = time.perf_counter() - start_episode_t if exit_early: exit_early = False break if not stop_recording: # Start resetting env while the executor are finishing logging.info("Reset the environment") say("Reset the environment") timestamp = 0 start_vencod_t = time.perf_counter() # During env reset we save the data and encode the videos num_frames = frame_index for key in image_keys: tmp_imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}" fname = f"{key}_episode_{episode_index:06d}.mp4" video_path = local_dir / "videos" / fname if video_path.exists(): video_path.unlink() # Store the reference to the video frame, even tho the videos are not yet encoded ep_dict[key] = [] for i in range(num_frames): ep_dict[key].append({"path": f"videos/{fname}", "timestamp": i / fps}) for key in not_image_keys: ep_dict[key] = torch.stack(ep_dict[key]) for key in action: ep_dict[key] = torch.stack(ep_dict[key]) ep_dict["episode_index"] = torch.tensor([episode_index] * num_frames) ep_dict["frame_index"] = torch.arange(0, num_frames, 1) ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps done = torch.zeros(num_frames, dtype=torch.bool) done[-1] = True ep_dict["next.done"] = done ep_path = episodes_dir / f"episode_{episode_index}.pth" print("Saving episode dictionary...") torch.save(ep_dict, ep_path) rec_info = { "last_episode_index": episode_index, } with open(rec_info_path, "w") as f: json.dump(rec_info, f) is_last_episode = stop_recording or (episode_index == (num_episodes - 1)) # Wait if necessary with tqdm.tqdm(total=reset_time_s, desc="Waiting") as pbar: while timestamp < reset_time_s and not is_last_episode: time.sleep(1) timestamp = time.perf_counter() - start_vencod_t pbar.update(1) if exit_early: exit_early = False break # Skip updating episode index which forces re-recording episode if rerecord_episode: rerecord_episode = False continue episode_index += 1 if is_last_episode: logging.info("Done recording") say("Done recording", blocking=True) if not is_headless(): listener.stop() logging.info("Waiting for threads writing the images on disk to terminate...") for _ in tqdm.tqdm( concurrent.futures.as_completed(futures), total=len(futures), desc="Writting images" ): pass break robot.disconnect() if not is_headless(): cv2.destroyAllWindows() num_episodes = episode_index logging.info("Encoding videos") say("Encoding videos") # Use ffmpeg to convert frames stored as png into mp4 videos for episode_index in tqdm.tqdm(range(num_episodes)): for key in image_keys: tmp_imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}" fname = f"{key}_episode_{episode_index:06d}.mp4" video_path = local_dir / "videos" / fname if video_path.exists(): # Skip if video is already encoded. Could be the case when resuming data recording. continue # note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, # since video encoding with ffmpeg is already using multithreading. encode_video_frames(tmp_imgs_dir, video_path, fps, overwrite=True) shutil.rmtree(tmp_imgs_dir) logging.info("Concatenating episodes") ep_dicts = [] for episode_index in tqdm.tqdm(range(num_episodes)): ep_path = episodes_dir / f"episode_{episode_index}.pth" ep_dict = torch.load(ep_path) ep_dicts.append(ep_dict) data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict["frame_index"].shape[0] data_dict["index"] = torch.arange(0, total_frames, 1) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = { "codebase_version": CODEBASE_VERSION, "fps": fps, "video": video, } if video: info["encoding"] = get_default_encoding() lerobot_dataset = LeRobotDataset.from_preloaded( repo_id=repo_id, hf_dataset=hf_dataset, episode_data_index=episode_data_index, info=info, videos_dir=videos_dir, ) if run_compute_stats: logging.info("Computing dataset statistics") say("Computing dataset statistics") stats = compute_stats(lerobot_dataset) lerobot_dataset.stats = stats else: stats = {} logging.info("Skipping computation of the dataset statistics") hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved hf_dataset.save_to_disk(str(local_dir / "train")) meta_data_dir = local_dir / "meta_data" save_meta_data(info, stats, episode_data_index, meta_data_dir) if push_to_hub: hf_dataset.push_to_hub(repo_id, revision="main") push_meta_data_to_hub(repo_id, meta_data_dir, revision="main") push_dataset_card_to_hub(repo_id, revision="main", tags=tags) if video: push_videos_to_hub(repo_id, videos_dir, revision="main") create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION) logging.info("Exiting") say("Exiting") return lerobot_dataset def replay(robot: Robot, episode: int, fps: int | None = None, root="data", repo_id="lerobot/debug"): # TODO(rcadene): Add option to record logs local_dir = Path(root) / repo_id if not local_dir.exists(): raise ValueError(local_dir) dataset = LeRobotDataset(repo_id, root=root) items = dataset.hf_dataset.select_columns("action") from_idx = dataset.episode_data_index["from"][episode].item() to_idx = dataset.episode_data_index["to"][episode].item() if not robot.is_connected: robot.connect() logging.info("Replaying episode") say("Replaying episode", blocking=True) for idx in range(from_idx, to_idx): start_episode_t = time.perf_counter() action = items[idx]["action"] robot.send_action(action) dt_s = time.perf_counter() - start_episode_t busy_wait(1 / fps - dt_s) dt_s = time.perf_counter() - start_episode_t log_control_info(robot, dt_s, fps=fps) if __name__ == "__main__": parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="mode", required=True) # Set common options for all the subparsers base_parser = argparse.ArgumentParser(add_help=False) base_parser.add_argument( "--robot-path", type=str, default="lerobot/configs/robot/koch.yaml", help="Path to robot yaml file used to instantiate the robot using `make_robot` factory function.", ) base_parser.add_argument( "--robot-overrides", type=str, nargs="*", help="Any key=value arguments to override config values (use dots for.nested=overrides)", ) parser_calib = subparsers.add_parser("calibrate", parents=[base_parser]) parser_teleop = subparsers.add_parser("teleoperate", parents=[base_parser]) parser_teleop.add_argument( "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" ) parser_record = subparsers.add_parser("record", parents=[base_parser]) parser_record.add_argument( "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" ) parser_record.add_argument( "--root", type=Path, default="data", help="Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').", ) parser_record.add_argument( "--repo-id", type=str, default="lerobot/test", help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).", ) parser_record.add_argument( "--warmup-time-s", type=int, default=10, help="Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize.", ) parser_record.add_argument( "--episode-time-s", type=int, default=60, help="Number of seconds for data recording for each episode.", ) parser_record.add_argument( "--reset-time-s", type=int, default=60, help="Number of seconds for resetting the environment after each episode.", ) parser_record.add_argument("--num-episodes", type=int, default=50, help="Number of episodes to record.") parser_record.add_argument( "--run-compute-stats", type=int, default=1, help="By default, run the computation of the data statistics at the end of data collection. Compute intensive and not required to just replay an episode.", ) parser_record.add_argument( "--push-to-hub", type=int, default=1, help="Upload dataset to Hugging Face hub.", ) parser_record.add_argument( "--tags", type=str, nargs="*", help="Add tags to your dataset on the hub.", ) parser_record.add_argument( "--num-image-writers", type=int, default=8, help="Number of threads writing the frames as png images on disk. Don't set too much as you might get unstable fps due to main thread being blocked.", ) parser_record.add_argument( "--force-override", type=int, default=0, help="By default, data recording is resumed. When set to 1, delete the local directory and start data recording from scratch.", ) parser_record.add_argument( "-p", "--pretrained-policy-name-or-path", type=str, help=( "Either the repo ID of a model hosted on the Hub or a path to a directory containing weights " "saved using `Policy.save_pretrained`." ), ) parser_record.add_argument( "--policy-overrides", type=str, nargs="*", help="Any key=value arguments to override config values (use dots for.nested=overrides)", ) parser_replay = subparsers.add_parser("replay", parents=[base_parser]) parser_replay.add_argument( "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" ) parser_replay.add_argument( "--root", type=Path, default="data", help="Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').", ) parser_replay.add_argument( "--repo-id", type=str, default="lerobot/test", help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).", ) parser_replay.add_argument("--episode", type=int, default=0, help="Index of the episode to replay.") args = parser.parse_args() init_logging() control_mode = args.mode robot_path = args.robot_path robot_overrides = args.robot_overrides kwargs = vars(args) del kwargs["mode"] del kwargs["robot_path"] del kwargs["robot_overrides"] robot_cfg = init_hydra_config(robot_path, robot_overrides) robot = make_robot(robot_cfg) if control_mode == "calibrate": calibrate(robot, **kwargs) elif control_mode == "teleoperate": teleoperate(robot, **kwargs) elif control_mode == "record": pretrained_policy_name_or_path = args.pretrained_policy_name_or_path policy_overrides = args.policy_overrides del kwargs["pretrained_policy_name_or_path"] del kwargs["policy_overrides"] policy_cfg = None if pretrained_policy_name_or_path is not None: pretrained_policy_path = get_pretrained_policy_path(pretrained_policy_name_or_path) policy_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", policy_overrides) policy = make_policy(hydra_cfg=policy_cfg, pretrained_policy_name_or_path=pretrained_policy_path) record(robot, policy, policy_cfg, **kwargs) else: record(robot, **kwargs) elif control_mode == "replay": replay(robot, **kwargs) if robot.is_connected: # Disconnect manually to avoid a "Core dump" during process # termination due to camera threads not properly exiting. robot.disconnect()
lerobot/lerobot/scripts/control_robot.py/0
{ "file_path": "lerobot/lerobot/scripts/control_robot.py", "repo_id": "lerobot", "token_count": 13429 }
177
version https://git-lfs.github.com/spec/v1 oid sha256:b05f933aa67d559e44f062c8428b2f85ee7b49d3bf0e0302b9b83fb7d48ed0a3 size 2904
lerobot/tests/data/lerobot/aloha_sim_insertion_human/meta_data/stats.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_human/meta_data/stats.safetensors", "repo_id": "lerobot", "token_count": 71 }
178
version https://git-lfs.github.com/spec/v1 oid sha256:5fd5fe80657788d044cdc8a1baf1456c7695cc951049347a469165002a83c6c7 size 247
lerobot/tests/data/lerobot/aloha_sim_insertion_scripted/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_scripted/train/state.json", "repo_id": "lerobot", "token_count": 64 }
179
version https://git-lfs.github.com/spec/v1 oid sha256:e7ab5c2bd7d176d4d7902a600240318c2828b7d75f4a888d0887327e4eff089d size 65
lerobot/tests/data/lerobot/aloha_sim_transfer_cube_human_image/meta_data/info.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_sim_transfer_cube_human_image/meta_data/info.json", "repo_id": "lerobot", "token_count": 66 }
180
version https://git-lfs.github.com/spec/v1 oid sha256:e2e066afefdee57f3bc534085ab7af54e62d3ab2736d42863a89deb743cd0d04 size 1075
lerobot/tests/data/lerobot/aloha_static_candy/train/dataset_info.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_candy/train/dataset_info.json", "repo_id": "lerobot", "token_count": 66 }
181
version https://git-lfs.github.com/spec/v1 oid sha256:7841afb9ef99c0601448c43a20c25eb029440c73816319c67c5d7e1c5cde2445 size 136
lerobot/tests/data/lerobot/aloha_static_coffee_new/meta_data/episode_data_index.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_coffee_new/meta_data/episode_data_index.safetensors", "repo_id": "lerobot", "token_count": 65 }
182
version https://git-lfs.github.com/spec/v1 oid sha256:4abbdaeb8375f3cddf4b7061f6c74912b383f8e01c00217e5f14683e3105c59a size 4752
lerobot/tests/data/lerobot/aloha_static_pingpong_test/meta_data/stats.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_pingpong_test/meta_data/stats.safetensors", "repo_id": "lerobot", "token_count": 68 }
183
version https://git-lfs.github.com/spec/v1 oid sha256:9ee4f3c571ce6822e157e60133bee02245febee93eba5d35458d3c83345f7b87 size 136
lerobot/tests/data/lerobot/aloha_static_towel/meta_data/episode_data_index.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_towel/meta_data/episode_data_index.safetensors", "repo_id": "lerobot", "token_count": 62 }
184
version https://git-lfs.github.com/spec/v1 oid sha256:39a349eeba5b6f2a9cb4a5ebb33e250766130eb0c40f738c94f13926b8c2cd0c size 4208
lerobot/tests/data/lerobot/aloha_static_ziploc_slide/meta_data/stats.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_ziploc_slide/meta_data/stats.safetensors", "repo_id": "lerobot", "token_count": 70 }
185
version https://git-lfs.github.com/spec/v1 oid sha256:cf348d2c6a5cdca1f53f2df3ebce0c88230bf0b40870347f5a871c36b039c4de size 65
lerobot/tests/data/lerobot/pusht_image/meta_data/info.json/0
{ "file_path": "lerobot/tests/data/lerobot/pusht_image/meta_data/info.json", "repo_id": "lerobot", "token_count": 68 }
186
version https://git-lfs.github.com/spec/v1 oid sha256:96a44ecf236a7f2eb5f7ffca4dc8ac5e65f0dd2a1c55e35a55cbd364dfbe733d size 247
lerobot/tests/data/lerobot/umi_cup_in_the_wild/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/umi_cup_in_the_wild/train/state.json", "repo_id": "lerobot", "token_count": 68 }
187
version https://git-lfs.github.com/spec/v1 oid sha256:2153fc436001739e5a8bda7b59231b1d7a5082bafb5982564822c9da04de7673 size 188
lerobot/tests/data/lerobot/xarm_lift_medium_replay/meta_data/info.json/0
{ "file_path": "lerobot/tests/data/lerobot/xarm_lift_medium_replay/meta_data/info.json", "repo_id": "lerobot", "token_count": 63 }
188
version https://git-lfs.github.com/spec/v1 oid sha256:8133fe105b6e35182c4e24d8ac092730cb8f684f6591e4f1b3a4a2adaf224c46 size 912
lerobot/tests/data/lerobot/xarm_push_medium/train/dataset_info.json/0
{ "file_path": "lerobot/tests/data/lerobot/xarm_push_medium/train/dataset_info.json", "repo_id": "lerobot", "token_count": 68 }
189
version https://git-lfs.github.com/spec/v1 oid sha256:1ddb52e362094cc1469f34f7d723e235abccd24713b962f9765b4f910e85cebd size 12936
lerobot/tests/data/lerobot/xarm_push_medium_replay_image/meta_data/episode_data_index.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/xarm_push_medium_replay_image/meta_data/episode_data_index.safetensors", "repo_id": "lerobot", "token_count": 66 }
190
version https://git-lfs.github.com/spec/v1 oid sha256:01cfe50c537e3aef0cd5947ec0b15b321b54ecb461baf7b4f2506897158eebc8 size 111338
lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_80.safetensors/0
{ "file_path": "lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_80.safetensors", "repo_id": "lerobot", "token_count": 65 }
191
version https://git-lfs.github.com/spec/v1 oid sha256:9bb9b195d32e05550af0edd5df88fcc761c829ab8c4b129ba970a723f39b46ee size 68
lerobot/tests/data/save_policy_to_safetensors/aloha_act_1000_steps/output_dict.safetensors/0
{ "file_path": "lerobot/tests/data/save_policy_to_safetensors/aloha_act_1000_steps/output_dict.safetensors", "repo_id": "lerobot", "token_count": 65 }
192
version https://git-lfs.github.com/spec/v1 oid sha256:4636751d82103a268ac7cf36f1e69f6356f356b9c40561a9fe8557bb9255e2ee size 240
lerobot/tests/data/save_policy_to_safetensors/xarm_tdmpcuse_mpc/output_dict.safetensors/0
{ "file_path": "lerobot/tests/data/save_policy_to_safetensors/xarm_tdmpcuse_mpc/output_dict.safetensors", "repo_id": "lerobot", "token_count": 63 }
193
# TODO(rcadene): measure fps in nightly? # TODO(rcadene): test logs # TODO(rcadene): test calibration # TODO(rcadene): add compatibility with other motors bus import time import hydra import numpy as np import pytest from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.common.utils.utils import init_hydra_config from tests.utils import KOCH_ROBOT_CONFIG_PATH, require_koch def make_motors_bus(): robot_cfg = init_hydra_config(KOCH_ROBOT_CONFIG_PATH) # Instantiating a common motors structure. # Here the one from Alexander Koch follower arm. motors_bus = hydra.utils.instantiate(robot_cfg.leader_arms.main) return motors_bus @require_koch def test_find_port(request): from lerobot.common.robot_devices.motors.dynamixel import find_port find_port() @require_koch def test_configure_motors_all_ids_1(request): # This test expect the configuration was already correct. motors_bus = make_motors_bus() motors_bus.connect() motors_bus.write("Baud_Rate", [0] * len(motors_bus.motors)) motors_bus.set_bus_baudrate(9_600) motors_bus.write("ID", [1] * len(motors_bus.motors)) del motors_bus # Test configure motors_bus = make_motors_bus() motors_bus.connect() assert motors_bus.are_motors_configured() del motors_bus @require_koch def test_motors_bus(request): motors_bus = make_motors_bus() # Test reading and writting before connecting raises an error with pytest.raises(RobotDeviceNotConnectedError): motors_bus.read("Torque_Enable") with pytest.raises(RobotDeviceNotConnectedError): motors_bus.write("Torque_Enable", 1) with pytest.raises(RobotDeviceNotConnectedError): motors_bus.disconnect() # Test deleting the object without connecting first del motors_bus # Test connecting motors_bus = make_motors_bus() motors_bus.connect() # Test connecting twice raises an error with pytest.raises(RobotDeviceAlreadyConnectedError): motors_bus.connect() # Test disabling torque and reading torque on all motors motors_bus.write("Torque_Enable", 0) values = motors_bus.read("Torque_Enable") assert isinstance(values, np.ndarray) assert len(values) == len(motors_bus.motors) assert (values == 0).all() # Test writing torque on a specific motor motors_bus.write("Torque_Enable", 1, "gripper") # Test reading torque from this specific motor. It is now 1 values = motors_bus.read("Torque_Enable", "gripper") assert len(values) == 1 assert values[0] == 1 # Test reading torque from all motors. It is 1 for the specific motor, # and 0 on the others. values = motors_bus.read("Torque_Enable") gripper_index = motors_bus.motor_names.index("gripper") assert values[gripper_index] == 1 assert values.sum() == 1 # gripper is the only motor to have torque 1 # Test writing torque on all motors and it is 1 for all. motors_bus.write("Torque_Enable", 1) values = motors_bus.read("Torque_Enable") assert (values == 1).all() # Test ordering the motors to move slightly (+1 value among 4096) and this move # can be executed and seen by the motor position sensor values = motors_bus.read("Present_Position") motors_bus.write("Goal_Position", values + 1) # Give time for the motors to move to the goal position time.sleep(1) new_values = motors_bus.read("Present_Position") assert (new_values == values).all()
lerobot/tests/test_motors.py/0
{ "file_path": "lerobot/tests/test_motors.py", "repo_id": "lerobot", "token_count": 1243 }
194
{ "model_name_or_path": "./parler-tts-untrained-600M/parler-tts-untrained-600M/", "save_to_disk": "./tmp_dataset_audio/", "temporary_save_to_disk": "./audio_code_tmp/", "feature_extractor_name":"ylacombe/dac_44khZ_8kbps", "description_tokenizer_name":"google/flan-t5-base", "prompt_tokenizer_name":"google/flan-t5-base", "report_to": ["wandb"], "overwrite_output_dir": true, "output_dir": "./output_dir_training", "train_dataset_name": "blabble-io/libritts_r", "train_metadata_dataset_name": "parler-tts/libritts_r_tags_tagged_10k_generated", "train_dataset_config_name": "clean", "train_split_name": "test.clean", "eval_dataset_name": "blabble-io/libritts_r", "eval_metadata_dataset_name": "parler-tts/libritts_r_tags_tagged_10k_generated", "eval_dataset_config_name": "clean", "eval_split_name": "test.clean", "target_audio_column_name": "audio", "description_column_name": "text_description", "prompt_column_name": "text", "max_eval_samples": 48, "max_train_samples": 96, "max_duration_in_seconds": 20, "min_duration_in_seconds": 2.0, "add_audio_samples_to_wandb": true, "id_column_name": "id", "preprocessing_num_workers": 8, "do_train": true, "num_train_epochs": 50, "gradient_accumulation_steps": 1, "gradient_checkpointing": false, "per_device_train_batch_size": 4, "learning_rate": 1e-3, "adam_beta1": 0.9, "adam_beta2": 0.99, "weight_decay": 0.01, "lr_scheduler_type": "cosine", "warmup_steps": 40, "logging_steps": 2, "freeze_text_encoder": true, "do_eval": true, "predict_with_generate": true, "include_inputs_for_metrics": true, "evaluation_strategy": "steps", "eval_steps": 500, "save_steps": 5000, "per_device_eval_batch_size": 12, "audio_encoder_per_device_batch_size":24, "dtype": "bfloat16", "seed": 456, "dataloader_num_workers":8 }
parler-tts/helpers/training_configs/librispeech_tts_r_300M_dummy.json/0
{ "file_path": "parler-tts/helpers/training_configs/librispeech_tts_r_300M_dummy.json", "repo_id": "parler-tts", "token_count": 913 }
195
import logging from dataclasses import dataclass from typing import Dict, List, Optional, Set, Union import datasets import numpy as np import torch from accelerate import Accelerator from datasets import Dataset, IterableDataset, concatenate_datasets, interleave_datasets, load_dataset from tqdm import tqdm from transformers import AutoFeatureExtractor, AutoTokenizer @dataclass class DataCollatorEncodecWithPadding: """ Data collator that will dynamically pad the inputs received to the longest sequence in the batch or to `max_length` if `max_length` is set and `padding=max_length`. """ feature_extractor: AutoFeatureExtractor audio_column_name: str feature_extractor_input_name: Optional[str] = "input_values" max_length: Optional[int] = None padding: Optional[str] = "longest" def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods audios = [feature[self.audio_column_name]["array"] for feature in features] len_audio = [len(audio) for audio in audios] if self.max_length is not None: audios = [audio[: min(l, self.max_length)] for audio, l in zip(audios, len_audio)] # since resampling has already been performed in the 'load_multiple_datasets' function, # a fixed sampling_rate(44100hz) is passed to the feature_extractor. sampling_rate = self.feature_extractor.sampling_rate batch = self.feature_extractor( audios, sampling_rate=sampling_rate, return_tensors="pt", padding=self.padding, max_length=self.max_length ) batch["len_audio"] = torch.tensor(len_audio).unsqueeze(1) return batch @dataclass class DataCollatorParlerTTSWithPadding: """ Data collator that will dynamically pad the inputs received. Args: prompt_tokenizer (:class:`~transformers.AutoTokenizer`) The prompt_tokenizer used for proccessing the data. description_tokenizer (:class:`~transformers.AutoTokenizer`) The description_tokenizer used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ prompt_tokenizer: AutoTokenizer description_tokenizer: AutoTokenizer padding: Union[bool, str] = "longest" pad_to_multiple_of: Optional[int] = None prompt_max_length: Optional[int] = None description_max_length: Optional[int] = None audio_max_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods labels = [torch.tensor(feature["labels"]).transpose(0, 1) for feature in features] # (bsz, seq_len, num_codebooks) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=-100) if self.audio_max_length is not None and self.padding == "max_length": labels = torch.nn.functional.pad( labels, pad=(0, 0, 0, max(self.audio_max_length - labels.shape[1], 0)), value=-100 ) input_ids = [{"input_ids": feature["input_ids"]} for feature in features] input_ids = self.description_tokenizer.pad( input_ids, return_tensors="pt", padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, max_length=self.description_max_length, ) batch = {"labels": labels, **input_ids} prompt_input_ids = [{"input_ids": feature["prompt_input_ids"]} for feature in features] prompt_input_ids = self.prompt_tokenizer.pad( prompt_input_ids, return_tensors="pt", padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, max_length=self.prompt_max_length, ) batch["prompt_input_ids"] = prompt_input_ids["input_ids"] if "attention_mask" in prompt_input_ids: batch["prompt_attention_mask"] = prompt_input_ids["attention_mask"] return batch def convert_dataset_str_to_list( dataset_names, dataset_config_names, metadata_dataset_names=None, splits=None, dataset_samples=None, default_split="train", ): if isinstance(dataset_names, str): dataset_names = dataset_names.split("+") dataset_config_names = dataset_config_names.split("+") splits = splits.split("+") if splits is not None else None dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None metadata_dataset_names = metadata_dataset_names.split("+") if metadata_dataset_names is not None else None # basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs if len(dataset_names) != len(dataset_config_names): raise ValueError( f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and" f" {len(dataset_config_names)} configs." ) if splits is not None and len(splits) != len(dataset_names): raise ValueError( f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits." ) if metadata_dataset_names is not None and len(metadata_dataset_names) != len(dataset_names): raise ValueError( f"Ensure one metadata dataset is passed for each dataset, got {len(dataset_names)} datasets and {len(metadata_dataset_names)} metadata datasets." ) if dataset_samples is not None: if len(dataset_samples) != len(dataset_names): raise ValueError( f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and " f"{len(dataset_samples)} samples." ) dataset_samples = [float(ds_sample) for ds_sample in dataset_samples] else: dataset_samples = [None] * len(dataset_names) splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for i, ds_name in enumerate(dataset_names): dataset_names_dict.append( { "name": ds_name, "config": dataset_config_names[i], "split": splits[i], "metadata_dataset_name": metadata_dataset_names[i], "samples": dataset_samples[i], } ) return dataset_names_dict def load_multiple_datasets( accelerator: Accelerator, dataset_names: Union[List, str], dataset_config_names: Union[List, str], metadata_dataset_names: Optional[str] = None, splits: Optional[Union[List, str]] = None, label_column_names: Optional[List] = None, stopping_strategy: Optional[str] = "first_exhausted", dataset_samples: Optional[Union[List, np.array]] = None, streaming: Optional[bool] = False, seed: Optional[int] = None, id_column_name: Optional[str] = None, columns_to_keep: Optional[Set[str]] = None, prompt_column_name: Optional[str] = None, sampling_rate: Optional[int] = None, audio_column_name: Optional[str] = None, logger: Optional[logging.Logger] = None, **kwargs, ) -> Union[Dataset, IterableDataset]: dataset_names_dict = convert_dataset_str_to_list( dataset_names, dataset_config_names, metadata_dataset_names, splits, label_column_names, dataset_samples ) if dataset_samples is not None: dataset_samples = [ds_dict["samples"] for ds_dict in dataset_names_dict] probabilities = np.array(dataset_samples) / np.sum(dataset_samples) else: probabilities = None all_datasets = [] # iterate over the datasets we want to interleave for dataset_dict in tqdm(dataset_names_dict, desc="Combining datasets..."): with accelerator.local_main_process_first(): dataset = load_dataset( dataset_dict["name"], dataset_dict["config"], split=dataset_dict["split"], streaming=streaming, **kwargs, ) dataset_features = dataset.features.keys() if sampling_rate is not None and audio_column_name is not None: # resample target audio dataset = dataset.cast_column(audio_column_name, datasets.features.Audio(sampling_rate=sampling_rate)) metadata_dataset_name = dataset_dict["metadata_dataset_name"] if metadata_dataset_name is not None: logger.info( f'Merging {dataset_dict["name"]} - {dataset_dict["split"]} with {metadata_dataset_name} - {dataset_dict["split"]}' ) metadata_dataset = load_dataset( metadata_dataset_name, dataset_dict["config"], split=dataset_dict["split"], streaming=streaming, **kwargs, ) # TODO(YL): I forgot to create unique ids for MLS english. # To iterate faster, I bypass the original id check and do another one. - Done once because assuming it won't change next time # if dataset_dict["name"] == "parler-tts/mls_eng_10k": # def concat_ids(book_id, speaker_id, begin_time): # return {"id": f"{book_id}_{speaker_id}_{str(begin_time).replace('.', '_')}"} # dataset = dataset.map(concat_ids, input_columns=["book_id", "speaker_id", "begin_time"], num_proc=24) # metadata_dataset = metadata_dataset.map(concat_ids, input_columns=["book_id", "speaker_id", "begin_time"], num_proc=24) # metadata_dataset = metadata_dataset.rename_column(id_column_name, f"metadata_{id_column_name}") if dataset_dict["name"] not in {"parler-tts/mls_eng_10k", "parler-tts/mls_eng"}: if id_column_name is not None and id_column_name not in dataset.column_names: raise ValueError( f"id_column_name={id_column_name} but has not been found in the dataset columns" f"- one of {', '.join(list(dataset.column_names))}." ) if id_column_name is not None and id_column_name not in metadata_dataset.column_names: raise ValueError( f"id_column_name={id_column_name} but has not been found in the metadata dataset columns" f"- one of {', '.join(list(metadata_dataset.column_names))}." ) elif id_column_name is not None: metadata_dataset = metadata_dataset.rename_column(id_column_name, f"metadata_{id_column_name}") metadata_columns_to_remove = set(metadata_dataset.column_names).intersection(set(dataset.column_names)) if prompt_column_name is not None: # We might have applied some transformations to the prompts (e.g punctuation restoration) # so we make sure to remove it from the original dataset if prompt_column_name in dataset.column_names: logger.info( f"REMOVE {prompt_column_name} from dataset {dataset_dict['name']} - dataset_dict['split']" ) dataset.remove_columns(prompt_column_name) metadata_columns_to_remove = set(metadata_dataset.column_names).intersection(set(dataset.column_names)) metadata_dataset = metadata_dataset.remove_columns(metadata_columns_to_remove) dataset = concatenate_datasets([dataset, metadata_dataset], axis=1) if id_column_name is not None and dataset_dict["name"] not in { "parler-tts/mls_eng_10k", "parler-tts/mls_eng", }: if ( len( dataset.filter( lambda id1, id2: id1 != id2, input_columns=[id_column_name, f"metadata_{id_column_name}"], ) ) != 0 ): raise ValueError( f"Concatenate didn't work. Some ids don't correspond on dataset {dataset_dict['name']}" ) dataset_features = dataset.features.keys() if columns_to_keep is not None: dataset = dataset.remove_columns(set(dataset_features - columns_to_keep)) all_datasets.append(dataset) if len(all_datasets) == 1: # we have a single dataset so just return it as is return all_datasets[0] if streaming: interleaved_dataset = interleave_datasets( all_datasets, stopping_strategy=stopping_strategy, probabilities=probabilities, seed=seed, ) else: with accelerator.local_main_process_first(): interleaved_dataset = concatenate_datasets(all_datasets) return interleaved_dataset
parler-tts/training/data.py/0
{ "file_path": "parler-tts/training/data.py", "repo_id": "parler-tts", "token_count": 6471 }
196
.PHONY: quality style test docs check_dirs := src tests examples docs scripts docker # Check that source code meets quality standards # this target runs checks on all files quality: ruff check $(check_dirs) ruff format --check $(check_dirs) doc-builder style src/peft tests docs/source --max_len 119 --check_only # Format source code automatically and check is there are any problems left that need manual fixing style: ruff check --fix $(check_dirs) ruff format $(check_dirs) doc-builder style src/peft tests docs/source --max_len 119 test: python -m pytest -n 3 tests/ $(if $(IS_GITHUB_CI),--report-log "ci_tests.log",) tests_examples_multi_gpu: python -m pytest -m multi_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",) tests_examples_single_gpu: python -m pytest -m single_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",) tests_core_multi_gpu: python -m pytest -m multi_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",) tests_core_single_gpu: python -m pytest -m single_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",) tests_common_gpu: python -m pytest tests/test_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_decoder.log",) python -m pytest tests/test_encoder_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_encoder_decoder.log",) tests_examples_multi_gpu_bnb: python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",) tests_examples_single_gpu_bnb: python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",) tests_core_multi_gpu_bnb: python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",) tests_core_single_gpu_bnb: python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",) tests_gpu_bnb_regression: python -m pytest tests/bnb/test_bnb_regression.py $(if $(IS_GITHUB_CI),--report-log "bnb_regression_gpu.log",) # For testing transformers tests for bnb runners transformers_tests: RUN_SLOW=1 python -m pytest transformers-clone/tests/quantization/bnb $(if $(IS_GITHUB_CI),--report-log "transformers_tests.log",) tests_regression: python -m pytest -s --regression tests/regression/ $(if $(IS_GITHUB_CI),--report-log "regression_tests.log",) tests_torch_compile: python -m pytest tests/test_torch_compile.py $(if $(IS_GITHUB_CI),--report-log "compile_tests.log",)
peft/Makefile/0
{ "file_path": "peft/Makefile", "repo_id": "peft", "token_count": 1019 }
197
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Orthogonal Finetuning (OFT and BOFT) This conceptual guide gives a brief overview of [OFT](https://arxiv.org/abs/2306.07280) and [BOFT](https://arxiv.org/abs/2311.06243), a parameter-efficient fine-tuning technique that utilizes orthogonal matrix to multiplicatively transform the pretrained weight matrices. To achieve efficient fine-tuning, OFT represents the weight updates with an orthogonal transformation. The orthogonal transformation is parameterized by an orthogonal matrix multiplied to the pretrained weight matrix. These new matrices can be trained to adapt to the new data while keeping the overall number of changes low. The original weight matrix remains frozen and doesnโ€™t receive any further adjustments. To produce the final results, both the original and the adapted weights are multiplied togethor. Orthogonal Butterfly (BOFT) generalizes OFT with Butterfly factorization and further improves its parameter efficiency and finetuning flexibility. In short, OFT can be viewed as a special case of BOFT. Different from LoRA that uses additive low-rank weight updates, BOFT uses multiplicative orthogonal weight updates. The comparison is shown below. <div class="flex justify-center"> <img src="https://raw.githubusercontent.com/wy1iu/butterfly-oft/main/assets/BOFT_comparison.png"/> </div> BOFT has some advantages compared to LoRA: * BOFT proposes a simple yet generic way to finetune pretrained models to downstream tasks, yielding a better preservation of pretraining knowledge and a better parameter efficiency. * Through the orthogonality, BOFT introduces a structural constraint, i.e., keeping the [hyperspherical energy](https://arxiv.org/abs/1805.09298) unchanged during finetuning. This can effectively reduce the forgetting of pretraining knowledge. * BOFT uses the butterfly factorization to efficiently parameterize the orthogonal matrix, which yields a compact yet expressive learning space (i.e., hypothesis class). * The sparse matrix decomposition in BOFT brings in additional inductive biases that are beneficial to generalization. In principle, BOFT can be applied to any subset of weight matrices in a neural network to reduce the number of trainable parameters. Given the target layers for injecting BOFT parameters, the number of trainable parameters can be determined based on the size of the weight matrices. ## Merge OFT/BOFT weights into the base model Similar to LoRA, the weights learned by OFT/BOFT can be integrated into the pretrained weight matrices using the merge_and_unload() function. This function merges the adapter weights with the base model which allows you to effectively use the newly merged model as a standalone model. <div class="flex justify-center"> <img src="https://raw.githubusercontent.com/wy1iu/butterfly-oft/main/assets/boft_merge.png"/> </div> This works because during training, the orthogonal weight matrix (R in the diagram above) and the pretrained weight matrices are separate. But once training is complete, these weights can actually be merged (multiplied) into a new weight matrix that is equivalent. ## Utils for OFT / BOFT ### Common OFT / BOFT parameters in PEFT As with other methods supported by PEFT, to fine-tune a model using OFT or BOFT, you need to: 1. Instantiate a base model. 2. Create a configuration (`OFTConfig` or `BOFTConfig`) where you define OFT/BOFT-specific parameters. 3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`. 4. Train the `PeftModel` as you normally would train the base model. ### BOFT-specific paramters `BOFTConfig` allows you to control how OFT/BOFT is applied to the base model through the following parameters: - `boft_block_size`: the BOFT matrix block size across different layers, expressed in `int`. Smaller block size results in sparser update matrices with fewer trainable paramters. **Note**, please choose `boft_block_size` to be divisible by most layer's input dimension (`in_features`), e.g., 4, 8, 16. Also, please only specify either `boft_block_size` or `boft_block_num`, but not both simultaneously or leaving both to 0, because `boft_block_size` x `boft_block_num` must equal the layer's input dimension. - `boft_block_num`: the number of BOFT matrix blocks across different layers, expressed in `int`. Fewer blocks result in sparser update matrices with fewer trainable paramters. **Note**, please choose `boft_block_num` to be divisible by most layer's input dimension (`in_features`), e.g., 4, 8, 16. Also, please only specify either `boft_block_size` or `boft_block_num`, but not both simultaneously or leaving both to 0, because `boft_block_size` x `boft_block_num` must equal the layer's input dimension. - `boft_n_butterfly_factor`: the number of butterfly factors. **Note**, for `boft_n_butterfly_factor=1`, BOFT is the same as vanilla OFT, for `boft_n_butterfly_factor=2`, the effective block size of OFT becomes twice as big and the number of blocks become half. - `bias`: specify if the `bias` parameters should be trained. Can be `"none"`, `"all"` or `"boft_only"`. - `boft_dropout`: specify the probability of multiplicative dropout. - `target_modules`: The modules (for example, attention blocks) to inject the OFT/BOFT matrices. - `modules_to_save`: List of modules apart from OFT/BOFT matrices to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task. ## BOFT Example Usage For an example of the BOFT method application to various downstream tasks, please refer to the following guides: Take a look at the following step-by-step guides on how to finetune a model with BOFT: - [Dreambooth finetuning with BOFT](../task_guides/boft_dreambooth) - [Controllable generation finetuning with BOFT (ControlNet)](../task_guides/boft_controlnet) For the task of image classification, one can initialize the BOFT config for a DinoV2 model as follows: ```py import transformers from transformers import AutoModelForSeq2SeqLM, BOFTConfig from peft import BOFTConfig, get_peft_model config = BOFTConfig( boft_block_size=4, boft_n_butterfly_factor=2, target_modules=["query", "value", "key", "output.dense", "mlp.fc1", "mlp.fc2"], boft_dropout=0.1, bias="boft_only", modules_to_save=["classifier"], ) model = transformers.Dinov2ForImageClassification.from_pretrained( "facebook/dinov2-large", num_labels=100, ) boft_model = get_peft_model(model, config) ```
peft/docs/source/conceptual_guides/oft.md/0
{ "file_path": "peft/docs/source/conceptual_guides/oft.md", "repo_id": "peft", "token_count": 1991 }
198
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AutoPeftModels The `AutoPeftModel` classes loads the appropriate PEFT model for the task type by automatically inferring it from the configuration file. They are designed to quickly and easily load a PEFT model in a single line of code without having to worry about which exact model class you need or manually loading a [`PeftConfig`]. ## AutoPeftModel [[autodoc]] auto.AutoPeftModel - from_pretrained ## AutoPeftModelForCausalLM [[autodoc]] auto.AutoPeftModelForCausalLM ## AutoPeftModelForSeq2SeqLM [[autodoc]] auto.AutoPeftModelForSeq2SeqLM ## AutoPeftModelForSequenceClassification [[autodoc]] auto.AutoPeftModelForSequenceClassification ## AutoPeftModelForTokenClassification [[autodoc]] auto.AutoPeftModelForTokenClassification ## AutoPeftModelForQuestionAnswering [[autodoc]] auto.AutoPeftModelForQuestionAnswering ## AutoPeftModelForFeatureExtraction [[autodoc]] auto.AutoPeftModelForFeatureExtraction
peft/docs/source/package_reference/auto_class.md/0
{ "file_path": "peft/docs/source/package_reference/auto_class.md", "repo_id": "peft", "token_count": 470 }
199
PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=1 ITER_NUM=50000 export RUN_NAME="${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATASET_NAME="oftverse/control-celeba-hq" export CKPT_NAME="checkpoint-${ITER_NUM}" export OUTPUT_DIR="./output/${DATASET_NAME}/${RUN_NAME}/${CKPT_NAME}" export CONTROLNET_PATH="${OUTPUT_DIR}/controlnet/model.safetensors" export UNET_PATH="${OUTPUT_DIR}/unet/${RUN_NAME}" accelerate launch eval.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --controlnet_path=$CONTROLNET_PATH \ --unet_path=$UNET_PATH \ --adapter_name=$RUN_NAME \ --output_dir=$OUTPUT_DIR \ --dataset_name=$DATASET_NAME \ --vis_overlays \
peft/examples/boft_controlnet/eval.sh/0
{ "file_path": "peft/examples/boft_controlnet/eval.sh", "repo_id": "peft", "token_count": 370 }
200
<jupyter_start><jupyter_code>import os import torch from accelerate.logging import get_logger from diffusers import StableDiffusionPipeline from diffusers.utils import check_min_version from peft import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) MODEL_NAME = "stabilityai/stable-diffusion-2-1" # MODEL_NAME="runwayml/stable-diffusion-v1-5" PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=1 SELECTED_SUBJECT="backpack" EPOCH_IDX = 200 PROJECT_NAME=f"dreambooth_{PEFT_TYPE}" RUN_NAME=f"{SELECTED_SUBJECT}_{PEFT_TYPE}_{BLOCK_NUM}{BLOCK_SIZE}{N_BUTTERFLY_FACTOR}" OUTPUT_DIR=f"./data/output/{PEFT_TYPE}" def get_boft_sd_pipeline( ckpt_dir, base_model_name_or_path=None, epoch=int, dtype=torch.float32, device="cuda", adapter_name="default" ): if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained( base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False ).to(device) load_adapter(pipe, ckpt_dir, epoch, adapter_name) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe def load_adapter(pipe, ckpt_dir, epoch, adapter_name="default"): unet_sub_dir = os.path.join(ckpt_dir, f"unet/{epoch}", adapter_name) text_encoder_sub_dir = os.path.join(ckpt_dir, f"text_encoder/{epoch}", adapter_name) if isinstance(pipe.unet, PeftModel): pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name) def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) prompt = "a photo of sks backpack on a wooden floor" negative_prompt = "low quality, blurry, unfinished" %%time pipe = get_boft_sd_pipeline(OUTPUT_DIR, MODEL_NAME, EPOCH_IDX, adapter_name=RUN_NAME) %%time image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image # load and reset another adapter # WARNING: requires training DreamBooth with `boft_bias=None` SELECTED_SUBJECT="dog" EPOCH_IDX = 200 RUN_NAME=f"{SELECTED_SUBJECT}_{PEFT_TYPE}_{BLOCK_NUM}{BLOCK_SIZE}{N_BUTTERFLY_FACTOR}" load_adapter(pipe, OUTPUT_DIR, epoch=EPOCH_IDX, adapter_name=RUN_NAME) set_adapter(pipe, adapter_name=RUN_NAME) %%time prompt = "a photo of sks dog running on the beach" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image<jupyter_output><empty_output>
peft/examples/boft_dreambooth/dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/boft_dreambooth/dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 1346 }
201
compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false
peft/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml/0
{ "file_path": "peft/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml", "repo_id": "peft", "token_count": 198 }
202
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example script demonstrating the time difference loading a model with a DoRA using ephemeral GPU offloading vs doing it purely on the CPU. Example outputs: $ python load_with_dora.py --- Loading model --- Loading checkpoint shards: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 4/4 [00:04<00:00, 1.03s/it] --- Loading PeftModel --- --- Done --- Model loading time: 4.83s PeftModel loading time: 28.14s Use ephemeral GPU offloading: False (Note: if this was the first time you ran the script, or if your cache was cleared, the times shown above are invalid, due to the time taken to download the model and DoRA files. Just re-run the script in this case.) $ python load_with_dora.py --ephemeral_gpu_offload --- Loading model --- Loading checkpoint shards: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 4/4 [00:03<00:00, 1.11it/s] --- Loading PeftModel --- --- Done --- Model loading time: 4.28s PeftModel loading time: 16.59s Use ephemeral GPU offloading: True (Note: if this was the first time you ran the script, or if your cache was cleared, the times shown above are invalid, due to the time taken to download the model and DoRA files. Just re-run the script in this case.) """ import argparse import time from huggingface_hub import snapshot_download from transformers import AutoModelForCausalLM from peft import PeftModel def main(): parser = argparse.ArgumentParser(description="Load a model with DoRA using ephemeral GPU offloading") parser.add_argument("--model", type=str, default="NousResearch/Hermes-2-Pro-Mistral-7B", help="Model to load") parser.add_argument( "--dora", type=str, default="peft-internal-testing/DoRA-Hermes-2-Pro-Mistral-7B", help="DoRA to use", ) parser.add_argument("--ephemeral_gpu_offload", action="store_true", help="Use ephemeral GPU offloading") parser.add_argument( "--merge_model_path", type="str", help="Merge the model with the DoRA model and save to the given path" ) args = parser.parse_args() peft_model_kwargs = { "ephemeral_gpu_offload": args.ephemeral_gpu_offload, "max_memory": {"cpu": "256GiB"}, "device_map": {"": "cpu"}, } # Predownload try: snapshot_download(repo_id=args.model) except Exception as e: print(f"Failed to download model: {e}") # We continue anyway as this might be e.g. a local directory or something try: snapshot_download(repo_id=args.dora) except Exception as e: print(f"Failed to download DoRA: {e}") # We continue anyway as this might be e.g. a local directory or something start = time.perf_counter() print("--- Loading model ---") model = AutoModelForCausalLM.from_pretrained(args.model) model_time = time.perf_counter() - start print("--- Loading PeftModel ---") peft_model = PeftModel.from_pretrained(model, args.dora, **peft_model_kwargs) print("--- Done ---") peft_model_time = time.perf_counter() - start print(f"Model loading time: {model_time:.2f}s") print(f"PeftModel loading time: {peft_model_time:.2f}s") print(f"Use ephemeral GPU offloading: {args.ephemeral_gpu_offload}") if args.merge_model_path is not None: merged_model = peft_model.merge_and_unload(progressbar=True) merged_model.save_pretrained(args.merge_model_path) if __name__ == "__main__": main()
peft/examples/ephemeral_gpu_offloading/load_with_dora.py/0
{ "file_path": "peft/examples/ephemeral_gpu_offloading/load_with_dora.py", "repo_id": "peft", "token_count": 1389 }
203
<jupyter_start><jupyter_text>IntroductionIn this notebook, we will learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from ๐Ÿค— PEFT to fine-tune an image classification model by ONLY using **0.77%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685). Let's get started by installing the dependencies. __*Note that this notebook builds on top the [official image classification example notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb).*__ Install dependenciesHere we're installing `peft` from source to ensure we have access to all the bleeding edge features of `peft`.<jupyter_code>!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q<jupyter_output>Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 6.3/6.3 MB 53.1 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 199.7/199.7 KB 24.5 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 81.4/81.4 KB 11.3 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 462.8/462.8 KB 46.9 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 190.3/190.3 KB 23.1 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 7.6/7.6 MB 102.9 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 213.0/213.0 KB 25.4 MB/s eta [[...]<jupyter_text>AuthenticationWe will share our fine-tuned model at the end of training. So, to do that we just authenticate using our ๐Ÿค— token. This token is available from [here](https://huggingface.co/settings/tokens). If you don't have a ๐Ÿค— account already, we highly encourage you to do so; it's free!<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output>Token is valid. Your token has been saved in your configured git credential helpers (store). Your token has been saved to /root/.cache/huggingface/token Login successful<jupyter_text>Check the library versions<jupyter_code>import transformers import accelerate import peft print(f"Transformers version: {transformers.__version__}") print(f"Accelerate version: {accelerate.__version__}") print(f"PEFT version: {peft.__version__}")<jupyter_output>Transformers version: 4.26.0 Accelerate version: 0.16.0 PEFT version: 0.1.0.dev0<jupyter_text>Select a model checkpoint to fine-tune<jupyter_code>model_checkpoint = "google/vit-base-patch16-224-in21k" # pre-trained model from which to fine-tune<jupyter_output><empty_output><jupyter_text>Load a datasetWe're only loading the first 5000 instances from the training set of the [Food-101 dataset](https://huggingface.co/datasets/food101) to keep this example runtime short.<jupyter_code>from datasets import load_dataset dataset = load_dataset("food101", split="train[:5000]")<jupyter_output><empty_output><jupyter_text>Prepare datasets for training and evaluation 1. Prepare `label2id` and `id2label` dictionaries. This will come in handy when performing inference and for metadata information.<jupyter_code>labels = dataset.features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = i id2label[i] = label id2label[2]<jupyter_output><empty_output><jupyter_text>2. We load the image processor of the model we're fine-tuning.<jupyter_code>from transformers import AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) image_processor<jupyter_output><empty_output><jupyter_text>As one might notice, the `image_processor` has useful information on which size the training and evaluation images should be resized, stats that should be used to normalize the pixel values, etc. 3. Using the image processor we prepare transformation functions for the datasets. These functions will include augmentation and pixel scaling.<jupyter_code>from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) train_transforms = Compose( [ RandomResizedCrop(image_processor.size["height"]), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(image_processor.size["height"]), CenterCrop(image_processor.size["height"]), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply train_transforms across a batch.""" example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): """Apply val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch<jupyter_output><empty_output><jupyter_text>4. We split our mini dataset into training and validation.<jupyter_code># split up training into training + validation splits = dataset.train_test_split(test_size=0.1) train_ds = splits["train"] val_ds = splits["test"]<jupyter_output><empty_output><jupyter_text>5. We set the transformation functions to the datasets accordingly.<jupyter_code>train_ds.set_transform(preprocess_train) val_ds.set_transform(preprocess_val)<jupyter_output><empty_output><jupyter_text>Load and prepare a model In this section, we first load the model we want to fine-tune.<jupyter_code>def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" )<jupyter_output><empty_output><jupyter_text>The `get_peft_model()` method that we will use in a moment wraps the original model to be fine-tuned as a `PeftModel`. So, it's important for us to initialize the original model correctly. As such, we initialize it by specifying the `label2id` and `id2label` so that `AutoModelForImageClassification` can initialize a append classification head to the underlying model, adapted for our dataset. We can confirm this from the warning below:```Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.weight', 'classifier.bias']```<jupyter_code>from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( model_checkpoint, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) print_trainable_parameters(model)<jupyter_output><empty_output><jupyter_text>Also, take note of the number of total trainable parameters of `model`: it's 100%! We'll compare this number to that of the LoRA model.We now use the `PeftModel` to wrap `model` so that the "update" matrices are added to the respective places.<jupyter_code>from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=16, target_modules=["query", "value"], lora_dropout=0.1, bias="none", modules_to_save=["classifier"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model)<jupyter_output>trainable params: 667493 || all params: 86466149 || trainable%: 0.77<jupyter_text>Let's unpack what's going on here. In order for LoRA to take effect, we need to specify the target modules to `LoraConfig` so that `get_peft_model()` knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with `query` and `value` respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After we wrap our base model `model` with `get_peft_model()` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Regarding the other parameters:* `r`: The dimension used by the LoRA update matrices.* `alpha`: Scaling factor.* `bias`: Specifying if the `bias` parameters should be trained. `None` denotes none of the `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency. We can also how many parameters we're actually training. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to notice a less number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. Training argumentsWe will leverage [๐Ÿค— Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) for fine-tuning. It accepts several arguments which we wrap using [`TrainingArguments`](https://huggingface.co/docs/transformers/main_classes/trainertransformers.TrainingArguments).<jupyter_code>from transformers import TrainingArguments, Trainer model_name = model_checkpoint.split("/")[-1] batch_size = 128 args = TrainingArguments( f"{model_name}-finetuned-lora-food101", remove_unused_columns=False, evaluation_strategy="epoch", save_strategy="epoch", learning_rate=5e-3, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, fp16=True, num_train_epochs=5, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, label_names=["labels"], )<jupyter_output><empty_output><jupyter_text>Some things to note here:* We're using a larger batch size since there is only a handful of parameters to train. * Larger learning rate than the normal (1e-5 for example). All of these things are a byproduct of the fact that we're training only a small number of parameters. This can potentially also reduce the need to conduct expensive hyperparameter tuning experiments. Prepare evaluation metric<jupyter_code>import numpy as np import evaluate metric = evaluate.load("accuracy") # the compute_metrics function takes a Named Tuple as input: # predictions, which are the logits of the model as Numpy arrays, # and label_ids, which are the ground-truth labels as Numpy arrays. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>Collation functionThis is used by `Trainer` to gather a batch of training and evaluation examples and prepare them in a format that is acceptable by the underlying model.<jupyter_code>import torch def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}<jupyter_output><empty_output><jupyter_text>Train and evaluate<jupyter_code>trainer = Trainer( lora_model, args, train_dataset=train_ds, eval_dataset=val_ds, tokenizer=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, ) train_results = trainer.train()<jupyter_output>Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory. WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory.<jupyter_text>In just a few minutes, we have a fine-tuned model with 96% validation accuracy. Also, note that we used a very small subset of the training dataset which is definitely impacting the results.<jupyter_code>trainer.evaluate(val_ds)<jupyter_output>***** Running Evaluation ***** Num examples = 500 Batch size = 128<jupyter_text>Sharing your model and inference Once the fine-tuning is done, we can share the LoRA parameters with the community like so:<jupyter_code>repo_name = f"sayakpaul/{model_name}-finetuned-lora-food101" lora_model.push_to_hub(repo_name)<jupyter_output>Uploading the following files to sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101: adapter_config.json,adapter_model.bin<jupyter_text>When we call `push_to_hub()` on the `lora_model`, only the LoRA parameters along with any modules specified in `modules_to_save` are saved. If we take a look at the [trained LoRA parameters](https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101/blob/main/adapter_model.bin), we see that it's only **2.6 MB**! This greatly helps with portability especially when we're using a very large model to fine-tune (such as [BLOOM](https://huggingface.co/bigscience/bloom)). Next, we see how to load the LoRA updated parameters along with our base model for inference. When we wrap a base model with `PeftModel` that modifications are DONE in place. So to mitigate any concerns that might stem from in place modifications, we newly initialize our base model just like we did earlier and construct our inference model.<jupyter_code>from peft import PeftConfig, PeftModel config = PeftConfig.from_pretrained(repo_name) model = model = AutoModelForImageClassification.from_pretrained( config.base_model_name_or_path, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) # Load the Lora model inference_model = PeftModel.from_pretrained(model, repo_name)<jupyter_output>loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--google--vit-base-patch16-224-in21k/snapshots/1ba429d32753f33a0660b80ac6f43a3c80c18938/config.json Model config ViTConfig { "_name_or_path": "google/vit-base-patch16-224-in21k", "architectures": [ "ViTModel" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "apple_pie", "1": "baby_back_ribs", "2": "baklava", "3": "beef_carpaccio", "4": "beef_tartare", "5": "beet_salad", "6": "beignets", "7": "bibimbap", "8": "bread_pudding", "9": "breakfast_burrito", "10": "bruschetta", "11": "caesar_salad", "12": "cannoli", "13": "caprese_salad", "14": "carrot_cake", "15": "ceviche", "16": "cheesecake", "17": "cheese_plate", "18": "chicken_curry", "19": "chicken_quesadilla", "20": "chicken_wings", "21": "ch[...]<jupyter_text>Don't worry about the warnings, they're harmless. Let's now fetch a sample for inference.<jupyter_code>from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg" image = Image.open(requests.get(url, stream=True).raw) image<jupyter_output><empty_output><jupyter_text>We first instantiate an `image_processor` from the underlying model repo.<jupyter_code>image_processor = AutoImageProcessor.from_pretrained(repo_name)<jupyter_output>loading configuration file preprocessor_config.json from cache at /root/.cache/huggingface/hub/models--sayakpaul--vit-base-patch16-224-in21k-finetuned-lora-food101/snapshots/fa2503cc7d91e0dd69728c1dc66ed80d7bd3289b/preprocessor_config.json Image processor ViTImageProcessor { "do_normalize": true, "do_rescale": true, "do_resize": true, "image_mean": [ 0.5, 0.5, 0.5 ], "image_processor_type": "ViTImageProcessor", "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "rescale_factor": 0.00392156862745098, "size": { "height": 224, "width": 224 } }<jupyter_text>We then prepare the sample for inference.<jupyter_code># prepare image for the model encoding = image_processor(image.convert("RGB"), return_tensors="pt") print(encoding.pixel_values.shape)<jupyter_output>torch.Size([1, 3, 224, 224])<jupyter_text>And run inference!<jupyter_code>import torch # forward pass with torch.no_grad(): outputs = inference_model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", inference_model.config.id2label[predicted_class_idx])<jupyter_output>Predicted class: beignets
peft/examples/image_classification/image_classification_peft_lora.ipynb/0
{ "file_path": "peft/examples/image_classification/image_classification_peft_lora.ipynb", "repo_id": "peft", "token_count": 6372 }
204
<jupyter_start><jupyter_text>IntroductionIn this notebook, we will learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from ๐Ÿค— PEFT to fine-tune a SegFormer model variant for semantic segmentation by ONLY using **14%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685). Let's get started by installing the dependencies. Install dependenciesHere we're installing `peft` from source to ensure we have access to all the bleeding edge features of `peft`.<jupyter_code>!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q<jupyter_output><empty_output><jupyter_text>AuthenticationWe will share our fine-tuned model at the end of training. So, to do that we just authenticate using our ๐Ÿค— token. This token is available from [here](https://huggingface.co/settings/tokens). If you don't have a ๐Ÿค— account already, we highly encourage you to do so; it's free!<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Load a datasetWe're only loading the first 150 instances from the training set of the [SceneParse150 dataset](https://huggingface.co/datasets/scene_parse_150) to keep this example runtime short.<jupyter_code>from datasets import load_dataset ds = load_dataset("scene_parse_150", split="train[:150]")<jupyter_output><empty_output><jupyter_text>Prepare train and test splits<jupyter_code>ds = ds.train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"]<jupyter_output><empty_output><jupyter_text>Prepare label mappersWe create two dictionaries:* `label2id`: maps the semantic classes of the dataset to integer ids.* `id2label`: `label2id` reversed.<jupyter_code>import json from huggingface_hub import cached_download, hf_hub_url repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label)<jupyter_output><empty_output><jupyter_text>Prepare datasets for training and evaluation<jupyter_code>from transformers import AutoImageProcessor checkpoint = "nvidia/mit-b0" image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True) from torchvision.transforms import ColorJitter jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) from PIL import Image import numpy as np def handle_grayscale_image(image): np_image = np.array(image) if np_image.ndim == 2: tiled_image = np.tile(np.expand_dims(np_image, -1), 3) return Image.fromarray(tiled_image) else: return Image.fromarray(np_image) def train_transforms(example_batch): images = [jitter(handle_grayscale_image(x)) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs def val_transforms(example_batch): images = [handle_grayscale_image(x) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs train_ds.set_transform(train_transforms) test_ds.set_transform(val_transforms)<jupyter_output><empty_output><jupyter_text>Evaluation functionIncluding a metric during training is often helpful for evaluating your modelโ€™s performance. You can quickly load a evaluation method with the [๐Ÿค— Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [mean Intersection over Union (IoU)](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the ๐Ÿค— Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric):<jupyter_code>import torch from torch import nn import evaluate metric = evaluate.load("mean_iou") def compute_metrics(eval_pred): with torch.no_grad(): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() # currently using _compute instead of compute # see this issue for more info: https://github.com/huggingface/evaluate/pull/328#issuecomment-1286866576 metrics = metric._compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) # add per category metrics as individual key-value pairs per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metrics<jupyter_output><empty_output><jupyter_text>Load a base modelFor this example, we use the [SegFormer B0 variant](https://huggingface.co/nvidia/mit-b0).<jupyter_code>def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" )<jupyter_output><empty_output><jupyter_text>We pass the `label2id` and `id2label` dictionaries to let the `AutoModelForSemanticSegmentation` class know that we're interested in a custom base model where the decoder head should be randomly initialized w.r.t our custom dataset. Note, however, that the rest of the model parameters are pre-trained and will be fine-tuned in a regular transfer learning setup.We also notice that the 100% parameters in the `model` are trainable.<jupyter_code>from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) print_trainable_parameters(model)<jupyter_output><empty_output><jupyter_text>Wrap `model` as a `PeftModel` for LoRA trainingThis involves two steps:* Defining a config with `LoraConfig`* Wrapping the original `model` with `get_peft_model()` with the config defined in the step above.<jupyter_code>from peft import LoraConfig, get_peft_model config = LoraConfig( r=32, lora_alpha=32, target_modules=["query", "value"], lora_dropout=0.1, bias="lora_only", modules_to_save=["decode_head"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model)<jupyter_output>===================================BUG REPORT=================================== Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues ================================================================================ trainable params: 564374 || all params: 3883766 || trainable%: 14.53<jupyter_text>Let's unpack what's going on here. In order for LoRA to take effect, we need to specify the target modules to `LoraConfig` so that `PeftModel` knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with `query` and `value` respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After we wrap our base model `model` with `PeftModel` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Regarding the other parameters:* `r`: The dimension used by the LoRA update matrices.* `alpha`: Scaling factor.* `bias`: Specifying if the `bias` parameters should be trained. `lora_only` denotes only the LoRA `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency. We can also how many parameters we're actually training. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to notice a less number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. For sanity, let's also manually verify the modules that are actually trainable in `lora_model`.<jupyter_code>for name, param in lora_model.named_parameters(): if param.requires_grad: print(name, param.shape)<jupyter_output>base_model.model.segformer.encoder.block.0.0.attention.self.query.lora_A.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.0.attention.self.query.lora_B.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.0.attention.self.value.lora_A.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.0.attention.self.value.lora_B.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.1.attention.self.query.lora_A.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.1.attention.self.query.lora_B.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.1.attention.self.value.lora_A.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.0.1.attention.self.value.lora_B.weight torch.Size([32, 32]) base_model.model.segformer.encoder.block.1.0.attention.self.query.lora_A.weight torch.Size([32, 64]) base_model.model.segformer.encoder.block.1.0.attention.self.query.lora_B.weight torch.Size([...]<jupyter_text>We can confirm that only the LoRA parameters appended to the attention blocks and the `decode_head` parameters are trainable. Train!This is a two-step process: 1. Define your training hyperparameters in [TrainingArguments](https://huggingface.co/docs/transformers/v4.26.0/en/main_classes/trainertransformers.TrainingArguments). It is important you donโ€™t remove unused columns because thisโ€™ll drop the image column. Without the image column, you canโ€™t create `pixel_values`. Set `remove_unused_columns=False` to prevent this behavior! The only other required parameter is output_dir which specifies where to save your model. At the end of each epoch, the `Trainer` will evaluate the IoU metric and save the training checkpoint.2. Pass the training arguments to [Trainer](https://huggingface.co/docs/transformers/v4.26.0/en/main_classes/trainertransformers.Trainer) along with the model, dataset, tokenizer, data collator, and `compute_metrics` function.3. Call `train()` to finetune your model.**Note** that This example is meant to walk you through the workflow when using PEFT for semantic segmentation. We didn't perform extensive hyperparameter tuning to achieve optimal results.<jupyter_code>model_name = checkpoint.split("/")[-1] training_args = TrainingArguments( output_dir=f"{model_name}-scene-parse-150-lora", learning_rate=5e-4, num_train_epochs=50, per_device_train_batch_size=4, per_device_eval_batch_size=2, save_total_limit=3, evaluation_strategy="epoch", save_strategy="epoch", logging_steps=5, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], ) trainer = Trainer( model=lora_model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) trainer.train()<jupyter_output><empty_output><jupyter_text>Saving the model and inference Here we use the `save_pretrained()` method of the `lora_model` to save the *LoRA-only parameters* locally. However, you can also use thr `push_to_hub()` method to upload these parameters directly to the Hugging Face Hub (as shown [here](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb)).<jupyter_code>model_id = "segformer-scene-parse-150-lora" lora_model.save_pretrained(model_id)<jupyter_output><empty_output><jupyter_text>We can see that the LoRA-only parameters are just **2.2 MB in size**! This greatly improves the portability when using very large models.<jupyter_code>!ls -lh {model_id}<jupyter_output>total 2.2M -rw-r--r-- 1 root root 369 Feb 8 03:09 adapter_config.json -rw-r--r-- 1 root root 2.2M Feb 8 03:09 adapter_model.bin<jupyter_text>Let's now prepare our `inference_model` and run an inference.<jupyter_code>from peft import PeftConfig config = PeftConfig.from_pretrained(model_id) model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) # Load the Lora model inference_model = PeftModel.from_pretrained(model, model_id)<jupyter_output><empty_output><jupyter_text>Fetch an image.<jupyter_code>import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" image = Image.open(requests.get(url, stream=True).raw) image<jupyter_output><empty_output><jupyter_text>Preprocess the image.<jupyter_code># prepare image for the model encoding = image_processor(image.convert("RGB"), return_tensors="pt") print(encoding.pixel_values.shape)<jupyter_output>torch.Size([1, 3, 512, 512])<jupyter_text>Run an inference.<jupyter_code>with torch.no_grad(): outputs = inference_model(pixel_values=encoding.pixel_values) logits = outputs.logits upsampled_logits = nn.functional.interpolate( logits, size=image.size[::-1], mode="bilinear", align_corners=False, ) pred_seg = upsampled_logits.argmax(dim=1)[0]<jupyter_output><empty_output><jupyter_text>Visualize the results.We need a color palette to visualize the results. Here, we use [one provided by the TensorFlow Model Garden repository](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.pyL51).<jupyter_code>def ade_palette(): """Creates a label colormap used in ADE20K segmentation benchmark. Returns: A colormap for visualizing segmentation results. """ return np.asarray( [ [0, 0, 0], [120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255], ] ) import matplotlib.pyplot as plt color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) palette = np.array(ade_palette()) for label, color in enumerate(palette): color_seg[pred_seg == label, :] = color color_seg = color_seg[..., ::-1] # convert to BGR img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map img = img.astype(np.uint8) plt.figure(figsize=(15, 10)) plt.imshow(img) plt.show()<jupyter_output><empty_output>
peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb/0
{ "file_path": "peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb", "repo_id": "peft", "token_count": 8322 }
205
[tool.black] # Only used by `hf-doc-builderยด. line-length = 119 target-version = ['py38'] [tool.ruff] target-version = "py38" line-length = 119 extend-exclude = ["*.ipynb"] [tool.ruff.lint] extend-select = [ "C", # Complexity "E", # PEP8 errors "F", # PEP8 formatting "I", # Import sorting "UP", # Pyupgrade upgrades "W", # PEP8 warnings "PT009", # Pytest assertions ] ignore = [ "C901", # Function too complex "E501", # Line length (handled by ruff-format) "UP007", # X | Y style Unions ] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["peft"] [tool.pytest] doctest_optionflags = [ "NORMALIZE_WHITESPACE", "ELLIPSIS", "NUMBER", ] [tool.pytest.ini_options] addopts = "--cov=src/peft --cov-report=term-missing --durations=10" markers = [ "single_gpu_tests: tests that run on a single GPU", "multi_gpu_tests: tests that run on multiple GPUs", "regression: whether to run regression suite test", "bitsandbytes: select bitsandbytes integration tests" ]
peft/pyproject.toml/0
{ "file_path": "peft/pyproject.toml", "repo_id": "peft", "token_count": 420 }
206
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any, List, Optional import torch import torch.nn as nn from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils import transpose class IA3Layer(BaseTunerLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("ia3_l",) def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None: self.base_layer = base_layer self.ia3_l = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.is_feedforward = is_feedforward base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def update_layer(self, adapter_name, init_ia3_weights): # This code works for linear layers, override for other layer types # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features)) else: weight = torch.randn((self.out_features, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_ia3_parameters(self, adapter_name): if adapter_name in self.ia3_l.keys(): # initialize learned vector with torch.ones nn.init.constant_(self.ia3_l[adapter_name], 1.0) class Linear(nn.Module, IA3Layer): # (IA)^3 implemented in a dense layer def __init__( self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later init_ia3_weights: bool = True, # whether to initialize IA3 weights **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) self.fan_in_fan_out = fan_in_fan_out self.is_target_conv_1d_layer = is_target_conv_1d_layer self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) orig_dtype = base_layer.weight.data.dtype if safe_merge: orig_weights = base_layer.weight.data orig_weights = torch.mul(orig_weights, ia3_l) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights.to(orig_dtype) else: base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l).to(orig_dtype) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) orig_dtype = base_layer.bias.data.dtype base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data).to(orig_dtype) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return warnings.warn("Unmerge result can be inaccurate for (IA)^3.") while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() # Add tolerace to avoid division by zero ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8 orig_dtype = base_layer.weight.data.dtype base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l).to(orig_dtype) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) orig_dtype = base_layer.bias.data.dtype base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8).to(orig_dtype) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: dtype = previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue dtype = self.ia3_l[active_adapter].dtype ia3_scaling *= self.ia3_l[active_adapter].flatten() if self.is_feedforward: x = x.to(dtype) # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype # e.g. bf16 vs fp32. Is that okay? interm = (x * ia3_scaling).to(previous_dtype) result = self.base_layer(interm, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result_dtype = result.dtype result = (result * ia3_scaling).to(result_dtype) return result class Conv2d(nn.Module, IA3Layer): def __init__( self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def update_layer(self, adapter_name, init_ia3_weights): # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features, 1, 1)) else: weight = torch.randn((1, self.out_features, 1, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() ia3_scaling = self.ia3_l[active_adapter].data if not self.is_feedforward: ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) if safe_merge: output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone() if not torch.isfinite(output_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = output_weight else: base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return warnings.warn("Unmerge result can be inaccurate for (IA)^3.") while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() # divide by (IA)^3 vector. Add tolerace to avoid division by zero ia3_scaling = self.ia3_l[active_adapter].data if not self.is_feedforward: ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: dtype = previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue dtype = self.ia3_l[active_adapter].dtype ia3_scaling *= self.ia3_l[active_adapter] if self.is_feedforward: x = x.to(dtype) # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype # e.g. bf16 vs fp32. Is that okay? interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) result = self.base_layer(interm, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result = result.to(dtype) * ia3_scaling result = result.to(previous_dtype) return result
peft/src/peft/tuners/ia3/layer.py/0
{ "file_path": "peft/src/peft/tuners/ia3/layer.py", "repo_id": "peft", "token_count": 6593 }
207
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata as importlib_metadata from typing import Any, Optional import packaging.version import torch from peft.import_utils import is_auto_awq_available from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_auto_awq_available(): from awq.modules.linear import WQLinear_GEMM class AwqLoraLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) def forward(self, x: torch.Tensor): result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_awq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM): # Raise the error only at the dispatch level AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0") version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq")) if AUTOAWQ_MINIMUM_VERSION > version_autoawq: raise ImportError( f"Found an incompatible version of auto-awq. Found version {version_autoawq}, " f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT." ) new_module = AwqLoraLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight return new_module
peft/src/peft/tuners/lora/awq.py/0
{ "file_path": "peft/src/peft/tuners/lora/awq.py", "repo_id": "peft", "token_count": 1532 }
208
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptTuningInit(str, enum.Enum): TEXT = "TEXT" RANDOM = "RANDOM" @dataclass class PromptTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEmbedding`]. Args: prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. prompt_tuning_init_text (`str`, *optional*): The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_name_or_path (`str`, *optional*): The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_kwargs (`dict`, *optional*): The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if `prompt_tuning_init` is `TEXT`. """ prompt_tuning_init: Union[PromptTuningInit, str] = field( default=PromptTuningInit.RANDOM, metadata={"help": "How to initialize the prompt tuning parameters"}, ) prompt_tuning_init_text: Optional[str] = field( default=None, metadata={ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_kwargs: Optional[dict] = field( default=None, metadata={ "help": ( "The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is " "`TEXT`" ), }, ) def __post_init__(self): self.peft_type = PeftType.PROMPT_TUNING if (self.prompt_tuning_init == PromptTuningInit.TEXT) and not self.tokenizer_name_or_path: raise ValueError( f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', " f"tokenizer_name_or_path can't be {self.tokenizer_name_or_path}." ) if (self.prompt_tuning_init == PromptTuningInit.TEXT) and self.prompt_tuning_init_text is None: raise ValueError( f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', " f"prompt_tuning_init_text can't be {self.prompt_tuning_init_text}." ) if self.tokenizer_kwargs and (self.prompt_tuning_init != PromptTuningInit.TEXT): raise ValueError( f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'." )
peft/src/peft/tuners/prompt_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/prompt_tuning/config.py", "repo_id": "peft", "token_count": 1394 }
209
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor: """ Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The tensor with the pruned weights. """ mask = torch.zeros_like(tensor).reshape(-1) k = int(density * tensor.numel()) top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True) mask[top_k[1]] = 1 return tensor * mask.reshape(tensor.shape) def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor: """ Prune random values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density)) pruned_tensor = tensor * mask if rescale: torch.div(input=pruned_tensor, other=density) return pruned_tensor def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") def calculate_majority_sign_mask( tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" ) -> torch.Tensor: """ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. Args: tensor (`torch.Tensor`):The tensor to get the mask from. method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The majority sign mask. """ sign = tensor.sign() if method == "total": sign_magnitude = tensor.sum(dim=0) elif method == "frequency": sign_magnitude = sign.sum(dim=0) else: raise RuntimeError(f'Unimplemented mask method "{method}"') majority_sign = torch.where(sign_magnitude >= 0, 1, -1) return sign == majority_sign def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using disjoint merge. Args: task_tensors (`torch.Tensor`):The task tensors to merge. majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. Returns: `torch.Tensor`: The merged tensor. """ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) num_params_preserved = majority_sign_mask.sum(dim=0) return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor. """ task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def dare_ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors
peft/src/peft/utils/merge_utils.py/0
{ "file_path": "peft/src/peft/utils/merge_utils.py", "repo_id": "peft", "token_count": 3819 }
210
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib import os import tempfile import unittest from collections import Counter from copy import deepcopy from dataclasses import dataclass from typing import Any, Dict, List, Union import pytest import torch from accelerate import infer_auto_device_map from accelerate.test_utils.testing import run_command from accelerate.utils import patch_environment from datasets import Audio, DatasetDict, load_dataset from packaging import version from parameterized import parameterized from torch.distributed import init_process_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, BitsAndBytesConfig, DataCollatorForLanguageModeling, Seq2SeqTrainer, Seq2SeqTrainingArguments, Trainer, TrainingArguments, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizer, ) from peft import ( AdaLoraConfig, LoftQConfig, LoraConfig, PeftModel, TaskType, get_peft_model, prepare_model_for_kbit_training, replace_lora_weights_loftq, ) from peft.tuners import boft from peft.utils import SAFETENSORS_WEIGHTS_NAME, infer_device from peft.utils.loftq_utils import NFQuantizer from peft.utils.other import fsdp_auto_wrap_policy from .testing_utils import ( require_aqlm, require_auto_awq, require_auto_gptq, require_bitsandbytes, require_eetq, require_hqq, require_non_cpu, require_optimum, require_torch_gpu, require_torch_multi_gpu, ) # A full testing suite that tests all the necessary features on GPU. The tests should # rely on the example scripts to test the features. @dataclass class DataCollatorSpeechSeq2SeqWithPadding: r""" Directly copied from: https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb """ processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch @require_torch_gpu @require_bitsandbytes class PeftBnbGPUExampleTests(unittest.TestCase): r""" A single GPU int8 + fp4 test suite, this will test if training fits correctly on a single GPU device (1x NVIDIA T4 16GB) using bitsandbytes. The tests are the following: - Seq2Seq model training based on: https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb - Causal LM model training based on: https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb - Audio model training based on: https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb """ def setUp(self): self.seq2seq_model_id = "google/flan-t5-base" self.causal_lm_model_id = "facebook/opt-6.7b" self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) self.audio_model_id = "openai/whisper-large" def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() def _check_inference_finite(self, model, batch): # try inference without Trainer class training = model.training model.eval() output = model(**batch.to(model.device)) assert torch.isfinite(output.logits).all() model.train(training) @pytest.mark.single_gpu_tests def test_causal_lm_training(self): r""" Test the CausalLM training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `opt-6.7b` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_causal_lm_training_4bit(self): r""" Test the CausalLM training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `opt-6.7b` on `english_quotes` dataset in few steps using 4bit base model. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests def test_causal_lm_training_multi_gpu_4bit(self): r""" Test the CausalLM training on a multi-GPU device with 4bit base model. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests @require_torch_gpu def test_4bit_adalora_causalLM(self): r""" Tests the 4bit training with adalora """ model_id = "facebook/opt-350m" # for >3 GPUs, might need: device_map={"": "cuda:0"} model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True) ) tokenizer = AutoTokenizer.from_pretrained(model_id) model.gradient_checkpointing_enable() model = prepare_model_for_kbit_training(model) peft_config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True) self._check_inference_finite(model, batch) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests @require_torch_gpu def test_8bit_adalora_causalLM(self): r""" Tests the 8bit training with adalora """ model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True) ) tokenizer = AutoTokenizer.from_pretrained(model_id) model.gradient_checkpointing_enable() model = prepare_model_for_kbit_training(model) peft_config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True) self._check_inference_finite(model, batch) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_causal_lm_training_multi_gpu(self): r""" Test the CausalLM training on a multi-GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `opt-6.7b` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto", ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_seq2seq_lm_training_single_gpu(self): r""" Test the Seq2SeqLM training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `flan-large` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map={"": 0}, ) assert set(model.hf_device_map.values()) == {0} tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_seq2seq_lm_training_multi_gpu(self): r""" Test the Seq2SeqLM training on a multi-GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `flan-large` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="balanced", ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_audio_model_training(self): r""" Test the audio model training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb """ with tempfile.TemporaryDirectory() as tmp_dir: dataset_name = "ybelkada/common_voice_mr_11_0_copy" task = "transcribe" language = "Marathi" common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, split="train+validation") common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) feature_extractor = WhisperFeatureExtractor.from_pretrained(self.audio_model_id) tokenizer = WhisperTokenizer.from_pretrained(self.audio_model_id, language=language, task=task) processor = WhisperProcessor.from_pretrained(self.audio_model_id, language=language, task=task) common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000)) def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor( audio["array"], sampling_rate=audio["sampling_rate"] ).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch common_voice = common_voice.map( prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2 ) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) model = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) model.config.forced_decoder_ids = None model.config.suppress_tokens = [] model = prepare_model_for_kbit_training(model) # as Whisper model uses Conv layer in encoder, checkpointing disables grad computation # to avoid this, make the inputs trainable def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.model.encoder.conv1.register_forward_hook(make_inputs_require_grad) config = LoraConfig( r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none" ) model = get_peft_model(model, config) model.print_trainable_parameters() training_args = Seq2SeqTrainingArguments( output_dir=tmp_dir, # change to a repo name of your choice per_device_train_batch_size=8, gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size learning_rate=1e-3, warmup_steps=2, max_steps=3, fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward label_names=["labels"], # same reason as above ) trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], data_collator=data_collator, tokenizer=processor.feature_extractor, ) trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_4bit_non_default_adapter_name(self): # See PR 1294 config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], bias="none", task_type="CAUSAL_LM", ) # default adapter name model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config) n_trainable_default, n_total_default = model.get_nb_trainable_parameters() # other adapter name model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config, adapter_name="other") n_trainable_other, n_total_other = model.get_nb_trainable_parameters() assert n_trainable_other > 0 # sanity check assert n_trainable_default == n_trainable_other assert n_total_default == n_total_other @pytest.mark.single_gpu_tests def test_8bit_non_default_adapter_name(self): # See PR 1294 config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], bias="none", task_type="CAUSAL_LM", ) # default adapter name model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_8bit=True), ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config) n_trainable_default, n_total_default = model.get_nb_trainable_parameters() # other adapter name model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_8bit=True), ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config, adapter_name="other") n_trainable_other, n_total_other = model.get_nb_trainable_parameters() assert n_trainable_other > 0 # sanity check assert n_trainable_default == n_trainable_other assert n_total_default == n_total_other @pytest.mark.single_gpu_tests def test_causal_lm_training_4bit_dora(self): r""" Same as test_causal_lm_training_4bit but with DoRA """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", use_dora=True, ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests def test_causal_lm_training_multi_gpu_4bit_dora(self): r""" Same as test_causal_lm_training_multi_gpu_4bit but with DoRA """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", use_dora=True, ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_causal_lm_training_8bit_dora(self): r""" Same as test_causal_lm_training_4bit_dora but with 8bit """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", use_dora=True, ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests def test_causal_lm_training_multi_gpu_8bit_dora(self): r""" Same as test_causal_lm_training_multi_gpu_4bit_dora but with 8bit """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_8bit=True), ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", use_dora=True, ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_causal_lm_training_gpt2_dora(self): r""" Same as test_causal_lm_training_4bit but with DoRA """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained("gpt2", device_map="auto") tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", use_dora=True, ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @parameterized.expand(["4bit", "8bit"]) def test_initialize_dora_with_bnb_on_cpu(self, kbit): # 1674 # The issue is that to initialize DoRA, we need to dequantize the weights. That only works on GPU for bnb. # Therefore, intializing DoRA with bnb on CPU used to fail. model_id = "facebook/opt-125m" if kbit == "4bit": bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4") elif kbit == "8bit": bnb_config = BitsAndBytesConfig(load_in_8bit=True) else: raise ValueError("Only 4bit and 8bit bnb allowed") model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config) model = model.cpu() # ensure that we're on CPU # sanity check that all weights are on CPU weights_not_cpu = [name for name, p in model.named_parameters() if p.device != torch.device("cpu")] assert not weights_not_cpu lora_config = LoraConfig(use_dora=True) # should not raise peft_model = get_peft_model(model, lora_config) # check that the weights are still on CPU weights_not_cpu = [name for name, p in peft_model.named_parameters() if p.device != torch.device("cpu")] assert not weights_not_cpu @require_torch_gpu @require_auto_gptq @require_optimum class PeftGPTQGPUTests(unittest.TestCase): r""" GPTQ + peft tests """ def setUp(self): from transformers import GPTQConfig self.causal_lm_model_id = "marcsun13/opt-350m-gptq-4bit" # TODO : check if it works for Exllamav2 kernels self.quantization_config = GPTQConfig(bits=4, use_exllama=False) self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() def _check_inference_finite(self, model, batch): # try inference without Trainer class training = model.training model.eval() output = model(**batch.to(model.device)) assert torch.isfinite(output.logits).all() model.train(training) @pytest.mark.single_gpu_tests def test_causal_lm_training(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_adalora_causalLM(self): r""" Tests the gptq training with adalora """ model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) peft_config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True) self._check_inference_finite(model, batch) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_causal_lm_training_multi_gpu(self): r""" Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_non_default_adapter_name(self): # See issue 1346 config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], task_type="CAUSAL_LM", ) # default adapter name model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config) n_trainable_default, n_total_default = model.get_nb_trainable_parameters() # other adapter name model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config, adapter_name="other") n_trainable_other, n_total_other = model.get_nb_trainable_parameters() assert n_trainable_other > 0 # sanity check assert n_trainable_default == n_trainable_other assert n_total_default == n_total_other @require_non_cpu class OffloadSaveTests(unittest.TestCase): def setUp(self): self.causal_lm_model_id = "gpt2" def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() def test_offload_load(self): r""" Test the loading of a LoRA model with CPU- and disk-offloaded modules """ torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) memory_limits = {"cpu": "0.4GIB"} # no "disk" for PeftModel.from_pretrained() compatibility # offload around half of all transformer modules to the disk device_map = infer_auto_device_map(model, max_memory=memory_limits) assert "cpu" in device_map.values() assert "disk" in device_map.values() config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, target_modules=["c_attn"]) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map="cpu") lora_model = PeftModel.from_pretrained(model, tmp_dir).eval() input_tokens = tokenizer.encode("Four score and seven years ago", return_tensors="pt") output = lora_model(input_tokens)[0] # load the model with device_map offloaded_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map=device_map) assert len({p.device for p in offloaded_model.parameters()}) == 2 # 'cpu' and 'meta' offloaded_lora_model = PeftModel.from_pretrained(offloaded_model, tmp_dir, max_memory=memory_limits).eval() offloaded_output = offloaded_lora_model(input_tokens)[0] assert torch.allclose(output, offloaded_output, atol=1e-5) @pytest.mark.single_gpu_tests def test_offload_merge(self): r""" Test merging, unmerging, and unloading of a model with CPU- and disk- offloaded modules. """ torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) memory_limits = {0: "0.2GIB", "cpu": "0.2GIB"} # no "disk" for PeftModel.from_pretrained() compatibility # offloads around half of all transformer modules device_map = infer_auto_device_map(model, max_memory=memory_limits) assert 0 in device_map.values() assert "cpu" in device_map.values() assert "disk" in device_map.values() config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, target_modules=["c_attn"]) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # load the model with device_map model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map=device_map).eval() assert len({p.device for p in model.parameters()}) == 2 model = PeftModel.from_pretrained(model, tmp_dir, max_memory=memory_limits) input_tokens = tokenizer.encode("Four score and seven years ago", return_tensors="pt") model.eval() # test peft model adapter merge pre_merge_olayer = model(input_tokens)[0] model.merge_adapter() post_merge_olayer = model(input_tokens)[0] assert torch.allclose(post_merge_olayer, pre_merge_olayer) # test peft model adapter unmerge model.unmerge_adapter() post_unmerge_olayer = model(input_tokens)[0] assert torch.allclose(post_unmerge_olayer, pre_merge_olayer) # test LoRA merge and unload model = model.merge_and_unload() post_unload_merge_olayer = model(input_tokens)[0] assert torch.allclose(post_unload_merge_olayer, pre_merge_olayer) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU") @pytest.mark.single_gpu_tests class TestPiSSA: r""" Tests for PiSSA to ensure that it reduces the quantization error compared to normal LoRA quantization. """ # The error factor indicates by how much the quantization error should be decreased when using PiSSA compared to # quantization without PiSSA. Thus 1.03 means that the error should be decreased by 3% at least. This is a very # conservative value to prevent flakiness, in practice most gains are > 1.5 error_factor = 1.03 def quantize_model(self, model, num_bits=4, device="cuda"): # Quantize the `weight.data` of the linear layer in the model to `num_bits` and store it with full precision. quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear) and "lm_head" not in name: quantized_weight, max_abs, shape = quantizer.quantize_block(module.weight.data.to(device)) module.weight.data = quantizer.dequantize_block(quantized_weight, max_abs, shape) return model def nuclear_norm(self, base_model, quantized_model): # Calculate the nuclear norm (sum of singular values) of the error matrices between the `quantized_model` and the `base_model`. error_list = [] for name, module in base_model.named_modules(): if isinstance(module, torch.nn.Linear) and "lm_head" not in name: quant_module = quantized_model.get_submodule(name) error_list.append(torch.linalg.svdvals(module.weight.data - quant_module.weight.data).sum()) return torch.Tensor(error_list).sum() def get_errors( self, tmp_path, bits=4, device="cuda", model_id="hf-internal-testing/tiny-random-BloomForCausalLM", ): # Comparing the quantized LoRA model to the base model, vs the PiSSA quantized model to the base model. # We expect the PiSSA quantized model to have less error than the normal LoRA quantized model. cls = AutoModelForSeq2SeqLM if "t5" in str(model_id) else AutoModelForCausalLM base_model = cls.from_pretrained(model_id).eval().to(device) task_type = TaskType.SEQ_2_SEQ_LM if base_model.config.is_encoder_decoder else TaskType.CAUSAL_LM # logits from the normal quantized LoRA model target_modules = "all-linear" if task_type != TaskType.SEQ_2_SEQ_LM else ["o", "k", "wi", "q", "v"] lora_config = LoraConfig(task_type=task_type, target_modules=target_modules) qlora_model = self.quantize_model(cls.from_pretrained(model_id).eval().to(device), bits, device) qlora_model = get_peft_model( qlora_model, lora_config, ) qlora_model = qlora_model.merge_and_unload() qlora_error = self.nuclear_norm(base_model, qlora_model) del qlora_model gc.collect() torch.cuda.empty_cache() # logits from quantized LoRA model using PiSSA lora_config = LoraConfig( task_type=task_type, init_lora_weights="pissa", target_modules=target_modules, ) pissa_model = cls.from_pretrained(model_id).eval().to(device) pissa_model = get_peft_model(pissa_model, lora_config) # save LoRA weights, they should be initialized such that they minimize the quantization error pissa_model.base_model.peft_config["default"].init_lora_weights = True pissa_model.save_pretrained(tmp_path / "pissa_model") pissa_model = pissa_model.unload() pissa_model.save_pretrained(tmp_path / "residual_model") del pissa_model gc.collect() torch.cuda.empty_cache() # now load quantized model and apply PiSSA-initialized weights on top qpissa_model = self.quantize_model( cls.from_pretrained(tmp_path / "residual_model").eval().to(device), bits, device ) qpissa_model = PeftModel.from_pretrained(qpissa_model, tmp_path / "pissa_model") qpissa_model = qpissa_model.merge_and_unload() qpissa_error = self.nuclear_norm(base_model, qpissa_model) del qpissa_model gc.collect() torch.cuda.empty_cache() assert qlora_error > 0.0 assert qpissa_error > 0.0 # next, check that PiSSA quantization errors are smaller than LoRA errors by a certain margin assert qpissa_error < (qlora_error / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_pissa_4bit(self, device, tmp_path): # In this test, we compare the logits of the base model, the quantized LoRA model, and the quantized model # using PiSSA. When quantizing, we expect a certain level of error. However, we expect the PiSSA quantized # model to have less error than the normal LoRA quantized model. Note that when using normal LoRA, the # quantization error is simply the error from quantization without LoRA, as LoRA is a no-op before training. # We still apply LoRA for the test for consistency. self.get_errors(bits=4, device=device, tmp_path=tmp_path) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_pissa_8bit(self, device, tmp_path): # Same test as test_bloomz_pissa_4bit but with 8 bits. self.get_errors(bits=8, device=device, tmp_path=tmp_path) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_t5_pissa_4bit(self, device, tmp_path): self.get_errors(bits=4, device=device, model_id="t5-small", tmp_path=tmp_path) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_t5_pissa_8bit(self, device, tmp_path): self.get_errors(bits=8, device=device, model_id="t5-small", tmp_path=tmp_path) @require_bitsandbytes def test_lora_pissa_conversion_same_output_after_loading_with_quantization(self, tmp_path): # A copy of the test `test_lora_pissa_conversion_same_output_after_loading` in peft/tests/test_initialization.py, # that would fail if bitsandbytes quantization is used because Quant(W_res) + AB !=Quant(W) + \Delta(AB). import bitsandbytes as bnb torch.manual_seed(0) data = torch.rand(10, 1000).to("cuda") class MyModule(torch.nn.Module): def __init__(self): super().__init__() # choose a large weight so that averages are close to expected values self.linear = torch.nn.Linear(1000, 1000) self.embed = torch.nn.Embedding(1000, 1000) self.conv2d = torch.nn.Conv2d(100, 100, 3) def forward(self, x): x_int = (100 * x).int() x_4d = x.flatten().reshape(1, 100, 10, 10) return self.linear(x), self.embed(x_int), self.conv2d(x_4d) model = MyModule().to("cuda") output_base = model(data)[0] config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8) peft_model = get_peft_model(deepcopy(model), config) # save the initial model peft_model.peft_config["default"].init_lora_weights = True peft_model.save_pretrained(tmp_path / "init-model") peft_model = peft_model.unload() torch.save(peft_model.state_dict(), tmp_path / "residual-model") del peft_model # create 4bit base model base_model = deepcopy(model) base_model.load_state_dict(torch.load(tmp_path / "residual-model")) # sanity check: the base model weights were indeed changed tol = 1e-06 assert not torch.allclose(model.linear.weight, base_model.linear.weight, atol=tol, rtol=tol) # quantize the linear layer linear4bit = bnb.nn.Linear4bit(base_model.linear.in_features, base_model.linear.out_features) linear4bit.load_state_dict(base_model.linear.state_dict()) linear4bit.to(0) base_model.linear = linear4bit peft_model = PeftModel.from_pretrained(deepcopy(base_model), tmp_path / "init-model") output_quantized_pissa = peft_model(data)[0] # sanity check tol = 1e-06 assert not torch.allclose(output_base, output_quantized_pissa, atol=tol, rtol=tol) # modify the weights, or else the adapter performs an identity transformation peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0 output_finetuned_pissa = peft_model(data)[0] # sanity check tol = 1e-06 assert not torch.allclose(output_quantized_pissa, output_finetuned_pissa, atol=tol, rtol=tol) # save the model normally peft_model.save_pretrained(tmp_path / "pissa-model") model_loaded = PeftModel.from_pretrained(deepcopy(base_model), tmp_path / "pissa-model") output_loaded = model_loaded(data)[0] assert torch.allclose(output_finetuned_pissa, output_loaded, atol=tol, rtol=tol) # sanity check: ranks should still be 8 as initially assert model_loaded.peft_config["default"].r == 8 assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8 # save the model with conversion peft_model.save_pretrained( tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model" ) model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted") output_converted = model_converted(data)[0] # rank should be double of what it was initially assert model_converted.peft_config["default"].r == 16 assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16 # base model weights should be the same as the initial model assert torch.allclose( model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol ) # This check is expected to fail when using bnb assert not torch.allclose(output_finetuned_pissa, output_converted, atol=tol, rtol=tol) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU") @pytest.mark.single_gpu_tests class TestOLoRA: r""" Tests for OLoRA to ensure that it reduces the quantization error compared to normal LoRA quantization. """ # The error factor indicates by how much the quantization error should be decreased when using OLoRA compared to # quantization without OLoRA. Thus 1.03 means that the error should be decreased by 3% at least. This is a very # conservative value to prevent flakiness, in practice most gains are > 1.5 error_factor = 1.2 def quantize_model(self, model, num_bits=4, device="cuda"): # Quantize the `weight.data` of the linear layer in the model to `num_bits` and store it with full precision. quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear) and "lm_head" not in name: quantized_weight, max_abs, shape = quantizer.quantize_block(module.weight.data.to(device)) module.weight.data = quantizer.dequantize_block(quantized_weight, max_abs, shape) return model def nuclear_norm(self, base_model, quantized_model): # Calculate the nuclear norm (sum of singular values) of the error matrices between the `quantized_model` and the `base_model`. error_list = [] for name, module in base_model.named_modules(): if isinstance(module, torch.nn.Linear) and "lm_head" not in name: quant_module = quantized_model.get_submodule(name) error_list.append(torch.linalg.svdvals(module.weight.data - quant_module.weight.data).sum()) return torch.Tensor(error_list).sum() def get_errors( self, tmp_path, bits=4, device="cuda", model_id="hf-internal-testing/tiny-random-BloomForCausalLM", ): # Comparing the quantized LoRA model to the base model, vs the OLoRA quantized model to the base model. # We expect the OLoRA quantized model to have less error than the normal LoRA quantized model. cls = AutoModelForSeq2SeqLM if "t5" in str(model_id) else AutoModelForCausalLM base_model = cls.from_pretrained(model_id).eval().to(device) task_type = TaskType.SEQ_2_SEQ_LM if base_model.config.is_encoder_decoder else TaskType.CAUSAL_LM # logits from the normal quantized LoRA model target_modules = "all-linear" if task_type != TaskType.SEQ_2_SEQ_LM else ["o", "k", "wi", "q", "v"] lora_config = LoraConfig(task_type=task_type, target_modules=target_modules) qlora_model = self.quantize_model(cls.from_pretrained(model_id).eval().to(device), bits, device) qlora_model = get_peft_model( qlora_model, lora_config, ) qlora_model = qlora_model.merge_and_unload() qlora_error = self.nuclear_norm(base_model, qlora_model) del qlora_model gc.collect() torch.cuda.empty_cache() # logits from quantized LoRA model using OLoRA lora_config = LoraConfig( task_type=task_type, init_lora_weights="olora", target_modules=target_modules, ) olora_model = cls.from_pretrained(model_id).eval().to(device) olora_model = get_peft_model(olora_model, lora_config) # save LoRA weights, they should be initialized such that they minimize the quantization error olora_model.base_model.peft_config["default"].init_lora_weights = True olora_model.save_pretrained(tmp_path / "olora_model") olora_model = olora_model.unload() olora_model.save_pretrained(tmp_path / "residual_model") del olora_model gc.collect() torch.cuda.empty_cache() # now load quantized model and apply OLoRA-initialized weights on top qolora_model = self.quantize_model( cls.from_pretrained(tmp_path / "residual_model").eval().to(device), bits, device ) qolora_model = PeftModel.from_pretrained(qolora_model, tmp_path / "olora_model") qolora_model = qolora_model.merge_and_unload() qolora_error = self.nuclear_norm(base_model, qolora_model) del qolora_model gc.collect() torch.cuda.empty_cache() assert qlora_error > 0.0 assert qolora_error > 0.0 # next, check that OLoRA quantization errors are smaller than LoRA errors by a certain margin assert qolora_error < (qlora_error / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_olora_4bit(self, device, tmp_path): # In this test, we compare the logits of the base model, the quantized LoRA model, and the quantized model # using OLoRA. When quantizing, we expect a certain level of error. However, we expect the OLoRA quantized # model to have less error than the normal LoRA quantized model. Note that when using normal LoRA, the # quantization error is simply the error from quantization without LoRA, as LoRA is a no-op before training. # We still apply LoRA for the test for consistency. self.get_errors(bits=4, device=device, tmp_path=tmp_path) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_olora_8bit(self, device, tmp_path): # Same test as test_bloomz_olora_4bit but with 8 bits. self.get_errors(bits=8, device=device, tmp_path=tmp_path) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU") class TestLoftQ: r""" Tests for LoftQ to ensure that it reduces the quantization error compared to normal LoRA quantization. """ # The error factor indicates by how much the quantization error should be decreased when using LoftQ compared to # quantization without LoftQ. Thus 1.03 means that the error should be decreased by 3% at least. This is a very # conservative value to prevent flakiness, in practice most gains are > 1.5 error_factor = 1.03 def get_input(self, model_id, device): tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer("All I want is", padding=True, return_tensors="pt") if device == "cuda": inputs = inputs.to("cuda") return inputs def get_base_model(self, model_id, device, **kwargs): cls = AutoModelForSeq2SeqLM if "t5" in str(model_id) else AutoModelForCausalLM model = cls.from_pretrained(model_id, **kwargs).eval() if device == "cuda": model = model.to("cuda") return model def get_logits(self, model, inputs): if model.config.is_encoder_decoder: input_ids = inputs["input_ids"] return model(input_ids=input_ids, decoder_input_ids=input_ids).logits return model(**inputs).logits def get_errors( self, tmp_path, bits=4, loftq_iter=1, device="cuda", model_id="hf-internal-testing/tiny-random-BloomForCausalLM", use_dora=False, ): # Helper function that returns the quantization errors (MAE and MSE) when comparing the quantized LoRA model # to the base model, vs the LoftQ quantized model to the base model. We expect the LoftQ quantized model to # have less error than the normal LoRA quantized model. Since we compare logits, the observed error is # already somewhat dampened because of the softmax. torch.manual_seed(0) model = self.get_base_model(model_id, device) task_type = TaskType.SEQ_2_SEQ_LM if model.config.is_encoder_decoder else TaskType.CAUSAL_LM inputs = self.get_input(model_id, device) # the base logits are the reference, we try to match those as closely as possible logits_base = self.get_logits(model, inputs) # clean up del model gc.collect() torch.cuda.empty_cache() # logits from the normal quantized LoRA model target_modules = "all-linear" if task_type != TaskType.SEQ_2_SEQ_LM else ["o", "k", "wi", "q", "v"] lora_config = LoraConfig(task_type=task_type, use_dora=use_dora, target_modules=target_modules) kwargs = {} if bits == 4: kwargs["quantization_config"] = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4") elif bits == 8: kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True) else: raise ValueError("bits must be 4 or 8") quantized_model = get_peft_model( self.get_base_model(model_id, device=None, **kwargs), lora_config, ) torch.manual_seed(0) logits_quantized = self.get_logits(quantized_model, inputs) del quantized_model gc.collect() torch.cuda.empty_cache() # logits from quantized LoRA model using LoftQ loftq_config = LoftQConfig(loftq_bits=bits, loftq_iter=loftq_iter) lora_config = LoraConfig( task_type=task_type, init_lora_weights="loftq", loftq_config=loftq_config, use_dora=use_dora, target_modules=target_modules, ) model = self.get_base_model(model_id, device) if device == "cuda": model = model.to("cuda") loftq_model = get_peft_model(model, lora_config) if device == "cuda": loftq_model = loftq_model.to("cuda") # save LoRA weights, they should be initialized such that they minimize the quantization error loftq_model.base_model.peft_config["default"].init_lora_weights = True loftq_model.save_pretrained(tmp_path / "loftq_model") loftq_model = loftq_model.unload() loftq_model.save_pretrained(tmp_path / "base_model") del loftq_model gc.collect() torch.cuda.empty_cache() # now load quantized model and apply LoftQ-initialized weights on top base_model = self.get_base_model(tmp_path / "base_model", device=None, **kwargs, torch_dtype=torch.float32) loftq_model = PeftModel.from_pretrained(base_model, tmp_path / "loftq_model", is_trainable=True) # TODO sanity check: model is quantized torch.manual_seed(0) logits_loftq = self.get_logits(loftq_model, inputs) del loftq_model gc.collect() torch.cuda.empty_cache() mae_quantized = torch.abs(logits_base - logits_quantized).mean() mse_quantized = torch.pow(logits_base - logits_quantized, 2).mean() mae_loftq = torch.abs(logits_base - logits_loftq).mean() mse_loftq = torch.pow(logits_base - logits_loftq, 2).mean() return mae_quantized, mse_quantized, mae_loftq, mse_loftq @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_loftq_4bit(self, device, tmp_path): # In this test, we compare the logits of the base model, the quantized LoRA model, and the quantized model # using LoftQ. When quantizing, we expect a certain level of error. However, we expect the LoftQ quantized # model to have less error than the normal LoRA quantized model. Note that when using normal LoRA, the # quantization error is simply the error from quantization without LoRA, as LoRA is a no-op before training. # We still apply LoRA for the test for consistency. mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(bits=4, device=device, tmp_path=tmp_path) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mse_loftq < (mse_quantized / self.error_factor) assert mae_loftq < (mae_quantized / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_loftq_4bit_iter_5(self, device, tmp_path): # Same test as the previous one but with 5 iterations. We should expect the error to be even smaller with more # iterations, but in practice the difference is not that large, at least not for this small base model. mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors( bits=4, loftq_iter=5, device=device, tmp_path=tmp_path ) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mse_loftq < (mse_quantized / self.error_factor) assert mae_loftq < (mae_quantized / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_loftq_8bit(self, device, tmp_path): # Same test as test_bloomz_loftq_4bit but with 8 bits. mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(bits=8, device=device, tmp_path=tmp_path) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mse_loftq < (mse_quantized / self.error_factor) assert mae_loftq < (mae_quantized / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_loftq_8bit_iter_5(self, device, tmp_path): # Same test as test_bloomz_loftq_4bit_iter_5 but with 8 bits. mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors( bits=8, loftq_iter=5, device=device, tmp_path=tmp_path ) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mse_loftq < (mse_quantized / self.error_factor) assert mae_loftq < (mae_quantized / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_t5_loftq_4bit(self, device, tmp_path): mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors( bits=4, device=device, model_id="t5-small", tmp_path=tmp_path ) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mse_loftq < (mse_quantized / self.error_factor) assert mae_loftq < (mae_quantized / self.error_factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_t5_loftq_8bit(self, device, tmp_path): mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors( bits=8, device=device, model_id="t5-small", tmp_path=tmp_path ) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mse_loftq < (mse_quantized / self.error_factor) assert mae_loftq < (mae_quantized / self.error_factor) @pytest.mark.xfail # failing for now, but having DoRA pass is only a nice-to-have, not a must, so we're good @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_loftq_4bit_dora(self, device, tmp_path): # same as test_bloomz_loftq_4bit but with DoRA mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors( bits=4, device=device, use_dora=True, tmp_path=tmp_path ) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin factor = 3 assert mae_loftq < (mae_quantized / factor) assert mse_loftq < (mse_quantized / factor) @pytest.mark.parametrize("device", ["cuda", "cpu"]) def test_bloomz_loftq_8bit_dora(self, device, tmp_path): # same as test_bloomz_loftq_8bit but with DoRA mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors( bits=8, device=device, use_dora=True, tmp_path=tmp_path ) # first, sanity check that all errors are > 0.0 assert mae_quantized > 0.0 assert mse_quantized > 0.0 assert mae_loftq > 0.0 assert mse_loftq > 0.0 # next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin assert mae_loftq < (mae_quantized / self.error_factor) assert mse_loftq < (mse_quantized / self.error_factor) def test_replace_lora_weights_with_loftq_using_callable(self): """ Test replacing LoRa weights with LoFTQ using a callable. Using the replace_lora_weights_loftq function, we replace the LoRa weights of a bnb-quantized model with LoRA weights initialized by LoftQ on the fly. We use a callable to decide whether to replace the weights or not. This callable checks, for each weight, if replacing it would actually result in logits that are closer to the original logits of the non-quantized model. """ torch.manual_seed(0) model_id = "bigscience/bloomz-560m" device = "cuda" tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer("The dog was", padding=True, return_tensors="pt").to(device) with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained(model_id).to(device) logits_base = model(**inputs).logits model.save_pretrained(tmp_dir) # load in 4bit bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config) model = get_peft_model(model, LoraConfig(task_type="CAUSAL_LM", target_modules="all-linear")) logits_lora = model(**inputs).logits current_mse = float("inf") logs = [] def my_callback(model, module_name): """Callable to replace weights with LoFTQ if the mse is lower than the current best one.""" nonlocal current_mse logits = model(**inputs).logits mse = ((logits_base - logits) ** 2).mean() if mse < current_mse: current_mse = mse logs.append(True) return True logs.append(False) return False replace_lora_weights_loftq(model, model_path=tmp_dir, callback=my_callback) logits_loftq = model(**inputs).logits mae_lora = (logits_base - logits_lora).abs().mean() mae_loftq = (logits_base - logits_loftq).abs().mean() mse_lora = ((logits_base - logits_lora) ** 2).mean() mse_loftq = ((logits_base - logits_loftq) ** 2).mean() # check that the error was reduced by a certain margin assert mae_loftq * 1.5 < mae_lora assert mse_loftq * 2.5 < mse_lora # check that the callback has returned some True and some False values assert any(logs) assert not all(logs) del model if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() def test_replace_lora_weights_with_local_model(self): # see issue 2020 torch.manual_seed(0) model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" device = "cuda" with tempfile.TemporaryDirectory() as tmp_dir: # save base model locally model = AutoModelForCausalLM.from_pretrained(model_id).to(device) model.save_pretrained(tmp_dir) del model # load in 4bit bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) # load the base model from local directory model = AutoModelForCausalLM.from_pretrained(tmp_dir, quantization_config=bnb_config) model = get_peft_model(model, LoraConfig()) # passing the local path directly works replace_lora_weights_loftq(model, model_path=tmp_dir) del model # load the base model from local directory model = AutoModelForCausalLM.from_pretrained(tmp_dir, quantization_config=bnb_config) model = get_peft_model(model, LoraConfig()) # when not passing, ensure that users are made aware of the `model_path` argument with pytest.raises(ValueError, match="model_path"): replace_lora_weights_loftq(model) del model if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() @require_bitsandbytes @require_torch_gpu class MultiprocessTester(unittest.TestCase): def test_notebook_launcher(self): script_path = os.path.join("scripts", "launch_notebook_mp.py") cmd = ["python", script_path] with patch_environment(omp_num_threads=1): run_command(cmd, env=os.environ.copy()) @require_non_cpu class MixedPrecisionTests(unittest.TestCase): def setUp(self): self.causal_lm_model_id = "facebook/opt-125m" self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) self.config = LoraConfig( r=16, lora_alpha=32, task_type="CAUSAL_LM", ) data = load_dataset("ybelkada/english_quotes_copy") self.data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() @pytest.mark.single_gpu_tests def test_model_using_float16_with_amp_raises(self): # This test shows the issue with using a model in fp16 and then trying to use it with mixed precision training, # which should not use fp16. model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) model = get_peft_model(model, self.config, autocast_adapter_dtype=False) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."): trainer.train() @pytest.mark.single_gpu_tests def test_model_using_float16_autocast_dtype(self): # Here we use autocast_adapter_dtype=True (the default) to automatically promote the adapter weights to float32. # No exception should be raised. model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) model = get_peft_model(model, self.config, autocast_adapter_dtype=True) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) trainer.train() # does not raise @pytest.mark.single_gpu_tests def test_model_using_float16_explicit_cast(self): # Same test as above but containing the fix to make it work model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) model = get_peft_model(model, self.config, autocast_adapter_dtype=False) # here we manually promote the adapter weights to float32 for param in model.parameters(): if param.requires_grad: param.data = param.data.float() dtype_counts_before = Counter(p.dtype for p in model.parameters()) model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) model = get_peft_model(model, self.config, autocast_adapter_dtype=True) dtype_counts_after = Counter(p.dtype for p in model.parameters()) assert dtype_counts_before == dtype_counts_after with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised max_steps=3, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) trainer.train() # does not raise @pytest.mark.single_gpu_tests def test_load_model_using_float16_with_amp_raises(self): # Same as previous tests, but loading the adapter with PeftModel.from_pretrained instead model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) model = get_peft_model(model, self.config, autocast_adapter_dtype=False) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, torch_dtype=torch.float16) model = PeftModel.from_pretrained(model, tmp_dir, autocast_adapter_dtype=False, is_trainable=True) trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."): trainer.train() @pytest.mark.single_gpu_tests def test_load_model_using_float16_autocast_dtype(self): # Same as previous tests, but loading the adapter with PeftModel.from_pretrained instead model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) # Below, we purposefully set autocast_adapter_dtype=False so that the saved adapter uses float16. We still want # the loaded adapter to use float32 when we load it with autocast_adapter_dtype=True. model = get_peft_model(model, self.config, autocast_adapter_dtype=False) # sanity check: this should have float16 adapter weights: assert ( model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype == torch.float16 ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, torch_dtype=torch.float16) model = PeftModel.from_pretrained(model, tmp_dir, autocast_adapter_dtype=True, is_trainable=True) # sanity check: this should NOT have float16 adapter weights: assert ( model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype == torch.float32 ) trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) trainer.train() # does not raise @pytest.mark.single_gpu_tests def test_load_adapter_using_float16_autocast_dtype(self): # Here we test the load_adapter method with autocast_adapter_dtype. We show that autocasting is prevented when # calling load_model(..., autocast_adapter_dtype=False) and that it is enabled when calling # load_model(..., autocast_adapter_dtype=True) (the default). model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, ) # Below, we purposefully set autocast_adapter_dtype=False so that the saved adapter uses float16. We still want # the loaded adapter to use float32 when we load it with autocast_adapter_dtype=True. model = get_peft_model(model, self.config, autocast_adapter_dtype=False) # sanity check: this should have float16 adapter weights: assert ( model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype == torch.float16 ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, torch_dtype=torch.float16) # the default adapter is now in float16 model = get_peft_model(model, self.config, autocast_adapter_dtype=False) # sanity check: this should NOT have float16 adapter weights: assert ( model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype == torch.float16 ) # now load the first adapter in float16 using the adapter name "loaded16" model.load_adapter(tmp_dir, "loaded16", autocast_adapter_dtype=False) assert ( model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["loaded16"].weight.dtype == torch.float16 ) # now load the first adapter in float32 using the adapter name "loaded32" model.load_adapter(tmp_dir, "loaded32", autocast_adapter_dtype=True) assert ( model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["loaded32"].weight.dtype == torch.float32 ) # training with the default adapter, which is in float16, should raise model.set_adapter("default") trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."): trainer.train() # training the model with the adapter "loaded16", which is in float16, should also raise model.set_adapter("loaded16") trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."): trainer.train() # training the model with the adapter "loaded32", which is in float32, should not raise model.set_adapter("loaded32") trainer = Trainer( model=model, train_dataset=self.data["train"], args=TrainingArguments( fp16=True, # <= this is required for the error to be raised output_dir=tmp_dir, max_steps=3, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) trainer.train() # does not raise @require_torch_gpu @require_aqlm @unittest.skipUnless( version.parse(importlib.metadata.version("transformers")) >= version.parse("4.38.0"), "test requires `transformers>=4.38.0`", ) class PeftAqlmGPUTests(unittest.TestCase): r""" AQLM + peft tests """ def setUp(self): self.causal_lm_model_id = "BlackSamorez/TinyLlama-1_1B-Chat-v1_0-AQLM-2Bit-1x16-hf" self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() def _check_inference_finite(self, model, batch): # try inference without Trainer class training = model.training model.eval() output = model(**batch.to(model.device)) assert torch.isfinite(output.logits).all() model.train(training) @pytest.mark.single_gpu_tests def test_causal_lm_training_aqlm(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="cuda", torch_dtype="auto", ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, logging_steps=1, output_dir=tmp_dir, fp16=True, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @require_torch_gpu @require_hqq @unittest.skipUnless( version.parse(importlib.metadata.version("transformers")) >= version.parse("4.36.1"), "test requires `transformers>=4.36.1`", ) class PeftHqqGPUTests(unittest.TestCase): r""" HQQ + peft tests """ def setUp(self): self.causal_lm_model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() @pytest.mark.single_gpu_tests @parameterized.expand([False, True]) def test_causal_lm_training_hqq(self, use_dora): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ from transformers import HqqConfig with tempfile.TemporaryDirectory() as tmp_dir: device = "cuda" compute_dtype = torch.float16 quant_config = HqqConfig(nbits=4, group_size=64) model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map=device, torch_dtype=compute_dtype, quantization_config=quant_config, ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", use_dora=use_dora, ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, logging_steps=1, output_dir=tmp_dir, fp16=True, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_hqq_lora_model_outputs(self): # check that the outputs generated by HQQ with LoRA are similar to those without HQQ from transformers import HqqConfig device = "cuda" compute_dtype = torch.float16 # first load the model without HQQ model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map=device, torch_dtype=compute_dtype, ) config = LoraConfig( target_modules=["q_proj", "v_proj"], task_type="CAUSAL_LM", init_lora_weights=False, ) torch.manual_seed(0) model = get_peft_model(model, config).eval() inputs = self.tokenizer("The meaning of unit tests is", return_tensors="pt").to(model.device) with torch.inference_mode(): output_normal = model(**inputs).logits assert torch.isfinite(output_normal).all() del model gc.collect() torch.cuda.empty_cache() # now load with HQQ quant_config = HqqConfig(nbits=4, group_size=64) model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map=device, torch_dtype=compute_dtype, quantization_config=quant_config, ) torch.manual_seed(0) model = get_peft_model(model, config).eval() with torch.inference_mode(): output_hqq = model(**inputs).logits # check that outputs of HQQ are highly correlated; there are outliers, so don't check for equality cc_matrix = torch.corrcoef(torch.stack((output_normal.flatten(), output_hqq.flatten()))) assert cc_matrix.min() > 0.97 # check that outputs are the same after merging cc_matrix = torch.corrcoef(torch.stack((output_normal.flatten(), output_hqq.flatten()))) assert cc_matrix.min() > 0.97 # check outputs are the same after unmerging model.unmerge_adapter() with torch.inference_mode(): output_unmerged = model(**inputs).logits cc_matrix = torch.corrcoef(torch.stack((output_normal.flatten(), output_unmerged.flatten()))) assert cc_matrix.min() > 0.97 # check that the results are the same after saving and loading with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) del model gc.collect() torch.cuda.empty_cache() quant_config = HqqConfig(nbits=4, group_size=64) model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map=device, torch_dtype=compute_dtype, quantization_config=quant_config, ) model = PeftModel.from_pretrained(model, tmp_dir) with torch.inference_mode(): output_loaded = model(**inputs).logits # for loading, we expect high precision, so check for equality and not just correlation atol, rtol = 1e-6, 1e-6 assert torch.allclose(output_hqq, output_loaded, atol=atol, rtol=rtol) # check that outputs are the same after merge_and_unload model = model.merge_and_unload() with torch.inference_mode(): output_merged_unloaded = model(**inputs).logits cc_matrix = torch.corrcoef(torch.stack((output_normal.flatten(), output_merged_unloaded.flatten()))) assert cc_matrix.min() > 0.97 # TODO: unskip the tests once https://github.com/casper-hansen/AutoAWQ/issues/466 is fixed @require_torch_gpu @require_auto_awq @pytest.mark.skip(reason="Needs https://github.com/casper-hansen/AutoAWQ/issues/466 to be fixed first") class PeftAwqGPUTests(unittest.TestCase): r""" Awq + peft tests """ def setUp(self): self.causal_lm_model_id = "peft-internal-testing/opt-125m-awq" self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() def _check_inference_finite(self, model, batch): # try inference without Trainer class training = model.training model.eval() output = model(**batch.to(model.device)) assert torch.isfinite(output.logits).all() model.train(training) @pytest.mark.single_gpu_tests def test_causal_lm_training_awq(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) # TODO: deal correctly with this case in transformers model._is_quantized_training_enabled = True trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, logging_steps=1, output_dir=tmp_dir, fp16=True, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_causal_lm_training_multi_gpu(self): r""" Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @require_torch_gpu @require_eetq class PeftEetqGPUTests(unittest.TestCase): r""" EETQ + peft tests """ def setUp(self): self.causal_lm_model_id = "facebook/opt-125m" self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() def _check_inference_finite(self, model, batch): # try inference without Trainer class training = model.training model.eval() output = model(**batch.to(model.device)) assert torch.isfinite(output.logits).all() model.train(training) @pytest.mark.single_gpu_tests def test_causal_lm_training_eetq(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ from transformers import EetqConfig with tempfile.TemporaryDirectory() as tmp_dir: quantization_config = EetqConfig("int8") model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", quantization_config=quantization_config ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_causal_lm_training_multi_gpu_eetq(self): r""" Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set correctly. """ from transformers import EetqConfig with tempfile.TemporaryDirectory() as tmp_dir: quantization_config = EetqConfig("int8") model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", quantization_config=quantization_config, ) assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count())) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None PRECISIONS = [(torch.float32), (torch.float16), (torch.bfloat16)] LORA_PARAMS = { "r": 8, "lora_alpha": 16, "lora_dropout": 0.05, } class SimpleModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding_layer = torch.nn.Embedding(1000, 768) self.layer_norm = torch.nn.LayerNorm(768) self.linear_transform = torch.nn.Linear(768, 256) def forward(self, input_ids): embedded_output = self.embedding_layer(input_ids) norm_output = self.layer_norm(embedded_output) linear_output = self.linear_transform(norm_output) return linear_output class SimpleConv2DModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding_layer = torch.nn.Embedding(1000, 768) self.layer_norm = torch.nn.LayerNorm(768) self.conv2d_transform = torch.nn.Conv2d(1, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) def forward(self, input_ids): # Additional layers for your custom model embedded_output = self.embedding_layer(input_ids) norm_output = self.layer_norm(embedded_output) # Reshape for Conv2d input (add batch size dimension) norm_output = norm_output.unsqueeze(1) conv_output = self.conv2d_transform(norm_output) # Remove batch size dimension conv_output = conv_output.squeeze(1) return conv_output @require_non_cpu class TestAutoCast(unittest.TestCase): device = infer_device() # This test makes sure, that Lora dtypes are consistent with the types # infered by torch.autocast under tested PRECISIONS @parameterized.expand(PRECISIONS) def test_simple_model(self, *args, **kwargs): self._test_model(SimpleModel(), *args, **kwargs) @parameterized.expand(PRECISIONS) def test_simple_lora_linear_model(self, *args, **kwargs): simple_model = SimpleModel() config = LoraConfig( **LORA_PARAMS, target_modules=["linear_transform"], ) lora_model = get_peft_model(simple_model, config) self._test_model(lora_model, *args, **kwargs) @parameterized.expand(PRECISIONS) def test_simple_lora_embedding_model(self, *args, **kwargs): simple_model = SimpleModel() config = LoraConfig( **LORA_PARAMS, target_modules=["embedding_layer"], ) lora_model = get_peft_model(simple_model, config) self._test_model(lora_model, *args, **kwargs) @parameterized.expand(PRECISIONS) def test_simple_conv2d_model(self, *args, **kwargs): self._test_model(SimpleConv2DModel(), *args, **kwargs) @parameterized.expand(PRECISIONS) def test_simple_lora_conv2d_model(self, *args, **kwargs): simple_model = SimpleConv2DModel() config = LoraConfig( **LORA_PARAMS, target_modules=["conv2d_transform"], ) lora_model = get_peft_model(simple_model, config) self._test_model(lora_model, *args, **kwargs) def _test_model(self, model, precision): # Move model to GPU model = model.to(self.device) # Prepare dummy inputs input_ids = torch.randint(0, 1000, (2, 10)).to(self.device) if precision == torch.bfloat16: is_xpu = self.device == "xpu" is_cuda_bf16 = self.device == "cuda" and torch.cuda.is_bf16_supported() if not (is_xpu or is_cuda_bf16): self.skipTest("Bfloat16 not supported on this device") # Forward pass with test precision with torch.autocast(enabled=True, dtype=precision, device_type=self.device): outputs = model(input_ids) assert outputs.dtype == precision class TestFSDPWrap: """ Test that we can successfully initialize an FSDP instance of the module. This is a very simple test, as it does not perform actual FSDP training. Here we just ensure that the FSDP instance can be created. This can fail for several reasons, e.g. int dtype from BNB or inconsistent requires_grad settings due to the auto wrap policy. """ @pytest.mark.single_gpu_tests @require_bitsandbytes def test_bnb_4bit_wrap_fsdp(self): quant_config = BitsAndBytesConfig( load_in_4bit=True, # float32 must be used, or else FSDP will complain about mixed int and float dtypes bnb_4bit_compute_dtype=torch.float32, bnb_4bit_quant_storage=torch.float32, bnb_4bit_use_double_quant=True, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", quantization_config=quant_config, torch_dtype=torch.float32, ) # model = prepare_model_for_kbit_training(model) config = LoraConfig( target_modules=["q_proj", "v_proj"], task_type="CAUSAL_LM", use_dora=True, ) model = get_peft_model(model, config) os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "29501" init_process_group(world_size=1, rank=0) # check that this does not raise: FSDP(model, auto_wrap_policy=fsdp_auto_wrap_policy(model), use_orig_params=False, sync_module_states=True) class TestBOFT: """ Test that we can correctly use half-precision models with BOFT. """ @require_torch_gpu @pytest.mark.single_gpu_tests def test_boft_half_linear(self): # Check that we can use BoFT with model loaded in half precision layer = torch.nn.Linear(160, 160).cuda() layer = boft.layer.Linear(layer, "layer", boft_n_butterfly_factor=2).to(dtype=torch.bfloat16) x = torch.randn(160, 160, device="cuda", dtype=torch.bfloat16) layer(x) # does not raise @require_torch_gpu @pytest.mark.single_gpu_tests def test_boft_half_conv(self): conv = torch.nn.Conv2d(1, 1, 4).cuda() conv = boft.layer.Conv2d(conv, "conv", boft_n_butterfly_factor=2).to(dtype=torch.bfloat16) x = torch.randn(1, 160, 160, device="cuda", dtype=torch.bfloat16) conv(x) # does not raise
peft/tests/test_gpu_examples.py/0
{ "file_path": "peft/tests/test_gpu_examples.py", "repo_id": "peft", "token_count": 60145 }
211
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import huggingface_hub import pytest import torch from safetensors.torch import load_file from transformers import AutoModelForCausalLM, AutoTokenizer from peft import LoraConfig, PeftType, TaskType, XLoraConfig, get_peft_model from peft.peft_model import PeftModel from peft.utils import infer_device class TestXlora: torch_device = infer_device() model_id = "facebook/opt-125m" num_loras = 4 @pytest.fixture(scope="function") def lora_dir(self, tmp_path_factory): return tmp_path_factory.mktemp("lora") @pytest.fixture(scope="function") def lora_embedding_dir(self, tmp_path_factory): return tmp_path_factory.mktemp("lora_embedding") @pytest.fixture(scope="function") def saved_lora_adapters(self, lora_dir): file_names = [] for i in range(1, self.num_loras + 1): torch.manual_seed(i) lora_config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False) model = AutoModelForCausalLM.from_pretrained(self.model_id) peft_model = get_peft_model(model, lora_config) file_name = os.path.join(lora_dir, f"checkpoint-{i}") peft_model.save_pretrained(file_name) file_names.append(file_name) return file_names @pytest.fixture(scope="function") def saved_lora_embedding_adapters(self, lora_embedding_dir): file_names = [] for i in range(1, self.num_loras + 1): torch.manual_seed(i) lora_config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, target_modules=["embed_tokens"]) model = AutoModelForCausalLM.from_pretrained(self.model_id) peft_model = get_peft_model(model, lora_config) file_name = os.path.join(lora_embedding_dir, f"checkpoint-{i}") peft_model.save_pretrained(file_name) file_names.append(file_name) return file_names @pytest.fixture(scope="function") def tokenizer(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True, device_map=self.torch_device) return tokenizer @pytest.fixture(scope="function") def embedding_model(self, saved_lora_embedding_adapters): model = AutoModelForCausalLM.from_pretrained(self.model_id) model.config.use_cache = False adapters = {str(i): file_name for i, file_name in enumerate(saved_lora_embedding_adapters)} peft_config = XLoraConfig( task_type=TaskType.CAUSAL_LM, peft_type=PeftType.XLORA, hidden_size=model.config.hidden_size, xlora_depth=8, adapters=adapters, ) model = get_peft_model(model, peft_config).to(self.torch_device) return model @pytest.fixture(scope="function") def model(self, saved_lora_adapters): model = AutoModelForCausalLM.from_pretrained(self.model_id) model.config.use_cache = False adapters = {str(i): file_name for i, file_name in enumerate(saved_lora_adapters)} peft_config = XLoraConfig( task_type=TaskType.CAUSAL_LM, peft_type=PeftType.XLORA, hidden_size=model.config.hidden_size, xlora_depth=8, adapters=adapters, ) model = get_peft_model(model, peft_config).to(self.torch_device) return model @pytest.fixture(scope="function") def model_layerwise(self, saved_lora_adapters): model = AutoModelForCausalLM.from_pretrained(self.model_id) model.config.use_cache = False adapters = {str(i): file_name for i, file_name in enumerate(saved_lora_adapters)} peft_config = XLoraConfig( task_type=TaskType.CAUSAL_LM, peft_type=PeftType.XLORA, hidden_size=model.config.hidden_size, xlora_depth=8, adapters=adapters, layerwise_scalings=True, ) model = get_peft_model(model, peft_config).to(self.torch_device) return model def test_functional(self, tokenizer, model): model.enable_scalings_logging() inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() def test_scalings_logging_methods(self, tokenizer, model): model.enable_scalings_logging() inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() _ = model.get_latest_scalings() # 32 is the numeber of max scalings. 3 is the number of prompt tokens. assert 32 + 3 >= len(model.get_scalings_log()) > 0 model.disable_scalings_logging() inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() assert 32 >= len(model.get_scalings_log()) > 0 bucketed = model.get_bucketed_scalings_log() keys = bucketed.keys() # One bucket for prompt (seqlen=...) and one for the completion (seqlen=1) assert len(bucketed) == 2 # One bucket for prompt (which has 1 elem) assert len(bucketed[max(keys)][0]) == 1 assert len(bucketed[max(keys)][1]) == 1 assert bucketed[max(keys)][0][0] == 0 # One bucket for completions with bucket name 1 assert len(bucketed[1][0]) > 1 assert len(bucketed[1][1]) > 1 assert bucketed[1][0][0] > 0 model.clear_scalings_log() assert len(model.get_scalings_log()) == 0 def test_misc_methods(self, tokenizer, model): model.set_global_scaling_weight(1.5) assert model.internal_xlora_classifier.config.global_scaling_weight == 1.5 assert model.get_global_scaling_weight() == 1.5 inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() assert str(model) is not None def test_save_load_functional(self, tokenizer, model, tmp_path): inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) before_logits = outputs[: inputs.shape[1] :] assert torch.isfinite(before_logits).all() model.save_pretrained(save_directory=tmp_path) del model model = AutoModelForCausalLM.from_pretrained(self.model_id) model.config.use_cache = False model = PeftModel.from_pretrained(model=model, model_id=tmp_path).to(self.torch_device) inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) after_logits = outputs[: inputs.shape[1] :] assert torch.isfinite(after_logits).all() assert torch.equal(after_logits, before_logits) def test_save_load_functional_pt(self, tokenizer, model, tmp_path): inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) before_logits = outputs[: inputs.shape[1] :] assert torch.isfinite(before_logits).all() model.save_pretrained(save_directory=tmp_path, safe_serialization=False) del model model = AutoModelForCausalLM.from_pretrained(self.model_id) model.config.use_cache = False model = PeftModel.from_pretrained(model=model, model_id=tmp_path, safe_serialization=False).to( self.torch_device ) inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) after_logits = outputs[: inputs.shape[1] :] assert torch.isfinite(after_logits).all() assert torch.equal(after_logits, before_logits), (after_logits, before_logits) def test_topk_lora(self, tokenizer, model): model.set_topk_lora(2) assert model.internal_xlora_classifier.config.top_k_lora == 2 inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() def test_softmax_topk(self, tokenizer, model): # Just reach in to set the config model.internal_xlora_classifier.config.top_k_lora = 2 model.internal_xlora_classifier.config.enable_softmax = False model.internal_xlora_classifier.config.enable_softmax_topk = True inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() def test_set_override_scaling_pass_value(self, model): # Defaults to 0 assert model.internal_xlora_classifier.override_scaling_pass_value == 0.0 # Set it to 2 and make sure it actually is model.set_scaling_pass_value(2) assert model.internal_xlora_classifier.override_scaling_pass_value == 2 assert model.internal_xlora_classifier.config.scaling_pass_value == 2 # Set it to 2 and make sure it is 1/a model.set_scaling_pass_value(None) assert model.internal_xlora_classifier.override_scaling_pass_value == 1 / self.num_loras assert model.internal_xlora_classifier.config.scaling_pass_value == 1 / self.num_loras def test_functional_layerwise(self, tokenizer, model_layerwise): model_layerwise.enable_scalings_logging() inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = model_layerwise.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() def test_disable_adapter(self, tokenizer, model): model.enable_scalings_logging() inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") with model.disable_adapter(): outputs_disabled = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) outputs = model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs_disabled[: inputs.shape[1] :]).all() assert torch.isfinite(outputs[: inputs.shape[1] :]).all() assert not torch.equal(outputs, outputs_disabled) def test_functional_embedding(self, tokenizer, embedding_model): inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt") outputs = embedding_model.generate( input_ids=inputs.to(self.torch_device), max_new_tokens=32, ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() def test_xlora_loading_valid(self): # This test also simulatenously tests the loading-from-hub functionality! torch.manual_seed(123) model_id = "facebook/opt-125m" model = AutoModelForCausalLM.from_pretrained(model_id) model.config.use_cache = False adapters = [ "peft-internal-testing/opt-125m-dummy-lora", "peft-internal-testing/opt-125m-dummy-lora", ] adapters = {str(i): file_name for i, file_name in enumerate(adapters)} peft_config = XLoraConfig( task_type=TaskType.CAUSAL_LM, peft_type=PeftType.XLORA, hidden_size=model.config.hidden_size, adapters=adapters, xlora_depth=8, xlora_size=2048, layerwise_scalings=True, xlora_dropout_p=0.2, ) model = get_peft_model(model, peft_config) downloaded = huggingface_hub.hf_hub_download(repo_id=adapters["0"], filename="adapter_model.safetensors") sd = load_file(downloaded) w0 = model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A["0"].weight w1 = sd["base_model.model.model.decoder.layers.0.self_attn.q_proj.lora_A.weight"] assert torch.allclose(w0, w1)
peft/tests/test_xlora.py/0
{ "file_path": "peft/tests/test_xlora.py", "repo_id": "peft", "token_count": 6277 }
212
#!/usr/bin/env python3 """ Checkpoint Cleaning Script Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc. and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 calculation for model zoo compatibility. Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ import torch import argparse import os import hashlib import shutil import tempfile from timm.models import load_state_dict try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--output', default='', type=str, metavar='PATH', help='output path') parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', help='use ema version of weights if present') parser.add_argument('--no-hash', dest='no_hash', action='store_true', help='no hash in output filename') parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') parser.add_argument('--safetensors', action='store_true', help='Save weights using safetensors instead of the default torch way (pickle).') def main(): args = parser.parse_args() if os.path.exists(args.output): print("Error: Output filename ({}) already exists.".format(args.output)) exit(1) clean_checkpoint( args.checkpoint, args.output, not args.no_use_ema, args.no_hash, args.clean_aux_bn, safe_serialization=args.safetensors, ) def clean_checkpoint( checkpoint, output, use_ema=True, no_hash=False, clean_aux_bn=False, safe_serialization: bool=False, ): # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save if checkpoint and os.path.isfile(checkpoint): print("=> Loading checkpoint '{}'".format(checkpoint)) state_dict = load_state_dict(checkpoint, use_ema=use_ema) new_state_dict = {} for k, v in state_dict.items(): if clean_aux_bn and 'aux_bn' in k: # If all aux_bn keys are removed, the SplitBN layers will end up as normal and # load with the unmodified model using BatchNorm2d. continue name = k[7:] if k.startswith('module.') else k new_state_dict[name] = v print("=> Loaded state_dict from '{}'".format(checkpoint)) ext = '' if output: checkpoint_root, checkpoint_base = os.path.split(output) checkpoint_base, ext = os.path.splitext(checkpoint_base) else: checkpoint_root = '' checkpoint_base = os.path.split(checkpoint)[1] checkpoint_base = os.path.splitext(checkpoint_base)[0] temp_filename = '__' + checkpoint_base if safe_serialization: assert _has_safetensors, "`pip install safetensors` to use .safetensors" safetensors.torch.save_file(new_state_dict, temp_filename) else: torch.save(new_state_dict, temp_filename) with open(temp_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() if ext: final_ext = ext else: final_ext = ('.safetensors' if safe_serialization else '.pth') if no_hash: final_filename = checkpoint_base + final_ext else: final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + final_ext shutil.move(temp_filename, os.path.join(checkpoint_root, final_filename)) print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) return final_filename else: print("Error: Checkpoint ({}) doesn't exist".format(checkpoint)) return '' if __name__ == '__main__': main()
pytorch-image-models/clean_checkpoint.py/0
{ "file_path": "pytorch-image-models/clean_checkpoint.py", "repo_id": "pytorch-image-models", "token_count": 1771 }
213
# CSP-ResNet **CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('cspresnet50', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `cspresnet50`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('cspresnet50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{wang2019cspnet, title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, year={2019}, eprint={1911.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP ResNet Paper: Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance Models: - Name: cspresnet50 In Collection: CSP ResNet Metadata: FLOPs: 5924992000 Parameters: 21620000 File Size: 86679303 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Label Smoothing - Polynomial Learning Rate Decay - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: cspresnet50 LR: 0.1 Layers: 50 Crop Pct: '0.887' Momentum: 0.9 Batch Size: 128 Image Size: '256' Weight Decay: 0.005 Interpolation: bilinear Training Steps: 8000000 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L415 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.57% Top 5 Accuracy: 94.71% -->
pytorch-image-models/hfdocs/source/models/csp-resnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/csp-resnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1706 }
214
# (Gluon) Xception **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) layers. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('gluon_xception65', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `gluon_xception65`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('gluon_xception65', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={Franรงois Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Gloun Xception Paper: Title: 'Xception: Deep Learning with Depthwise Separable Convolutions' URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise Models: - Name: gluon_xception65 In Collection: Gloun Xception Metadata: FLOPs: 17594889728 Parameters: 39920000 File Size: 160551306 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_xception65 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_xception.py#L241 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.7% Top 5 Accuracy: 94.87% -->
pytorch-image-models/hfdocs/source/models/gloun-xception.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/gloun-xception.mdx", "repo_id": "pytorch-image-models", "token_count": 1562 }
215
# RegNetX **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\) For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('regnetx_002', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `regnetx_002`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('regnetx_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollรกr}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RegNetX Paper: Title: Designing Network Design Spaces URL: https://paperswithcode.com/paper/designing-network-design-spaces Models: - Name: regnetx_002 In Collection: RegNetX Metadata: FLOPs: 255276032 Parameters: 2680000 File Size: 10862199 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_002 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L337 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 68.75% Top 5 Accuracy: 88.56% - Name: regnetx_004 In Collection: RegNetX Metadata: FLOPs: 510619136 Parameters: 5160000 File Size: 20841309 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_004 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L343 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.39% Top 5 Accuracy: 90.82% - Name: regnetx_006 In Collection: RegNetX Metadata: FLOPs: 771659136 Parameters: 6200000 File Size: 24965172 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_006 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.84% Top 5 Accuracy: 91.68% - Name: regnetx_008 In Collection: RegNetX Metadata: FLOPs: 1027038208 Parameters: 7260000 File Size: 29235944 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_008 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L355 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.05% Top 5 Accuracy: 92.34% - Name: regnetx_016 In Collection: RegNetX Metadata: FLOPs: 2059337856 Parameters: 9190000 File Size: 36988158 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_016 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L361 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.95% Top 5 Accuracy: 93.43% - Name: regnetx_032 In Collection: RegNetX Metadata: FLOPs: 4082555904 Parameters: 15300000 File Size: 61509573 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_032 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L367 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.15% Top 5 Accuracy: 94.09% - Name: regnetx_040 In Collection: RegNetX Metadata: FLOPs: 5095167744 Parameters: 22120000 File Size: 88844824 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_040 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L373 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.48% Top 5 Accuracy: 94.25% - Name: regnetx_064 In Collection: RegNetX Metadata: FLOPs: 8303405824 Parameters: 26210000 File Size: 105184854 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_064 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L379 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.06% Top 5 Accuracy: 94.47% - Name: regnetx_080 In Collection: RegNetX Metadata: FLOPs: 10276726784 Parameters: 39570000 File Size: 158720042 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_080 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L385 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.21% Top 5 Accuracy: 94.55% - Name: regnetx_120 In Collection: RegNetX Metadata: FLOPs: 15536378368 Parameters: 46110000 File Size: 184866342 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_120 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L391 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.61% Top 5 Accuracy: 94.73% - Name: regnetx_160 In Collection: RegNetX Metadata: FLOPs: 20491740672 Parameters: 54280000 File Size: 217623862 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_160 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L397 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.84% Top 5 Accuracy: 94.82% - Name: regnetx_320 In Collection: RegNetX Metadata: FLOPs: 40798958592 Parameters: 107810000 File Size: 431962133 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_320 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.25% Top 5 Accuracy: 95.03% -->
pytorch-image-models/hfdocs/source/models/regnetx.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/regnetx.mdx", "repo_id": "pytorch-image-models", "token_count": 6574 }
216
# Results CSV files containing an ImageNet-1K and out-of-distribution (OOD) test set validation results for all models with pretrained weights is located in the repository [results folder](https://github.com/rwightman/pytorch-image-models/tree/master/results). ## Self-trained Weights The table below includes ImageNet-1k validation results of model weights that I've trained myself. It is not updated as frequently as the csv results outputs linked above. |Model | Acc@1 (Err) | Acc@5 (Err) | Param # (M) | Interpolation | Image Size | |---|---|---|---|---|---| | efficientnet_b3a | 82.242 (17.758) | 96.114 (3.886) | 12.23 | bicubic | 320 (1.0 crop) | | efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | bicubic | 300 | | regnet_32 | 82.002 (17.998) | 95.906 (4.094) | 19.44 | bicubic | 224 | | skresnext50d_32x4d | 81.278 (18.722) | 95.366 (4.634) | 27.5 | bicubic | 288 (1.0 crop) | | seresnext50d_32x4d | 81.266 (18.734) | 95.620 (4.380) | 27.6 | bicubic | 224 | | efficientnet_b2a | 80.608 (19.392) | 95.310 (4.690) | 9.11 | bicubic | 288 (1.0 crop) | | resnet50d | 80.530 (19.470) | 95.160 (4.840) | 25.6 | bicubic | 224 | | mixnet_xl | 80.478 (19.522) | 94.932 (5.068) | 11.90 | bicubic | 224 | | efficientnet_b2 | 80.402 (19.598) | 95.076 (4.924) | 9.11 | bicubic | 260 | | seresnet50 | 80.274 (19.726) | 95.070 (4.930) | 28.1 | bicubic | 224 | | skresnext50d_32x4d | 80.156 (19.844) | 94.642 (5.358) | 27.5 | bicubic | 224 | | cspdarknet53 | 80.058 (19.942) | 95.084 (4.916) | 27.6 | bicubic | 256 | | cspresnext50 | 80.040 (19.960) | 94.944 (5.056) | 20.6 | bicubic | 224 | | resnext50_32x4d | 79.762 (20.238) | 94.600 (5.400) | 25 | bicubic | 224 | | resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1 | bicubic | 224 | | cspresnet50 | 79.574 (20.426) | 94.712 (5.288) | 21.6 | bicubic | 256 | | ese_vovnet39b | 79.320 (20.680) | 94.710 (5.290) | 24.6 | bicubic | 224 | | resnetblur50 | 79.290 (20.710) | 94.632 (5.368) | 25.6 | bicubic | 224 | | dpn68b | 79.216 (20.784) | 94.414 (5.586) | 12.6 | bicubic | 224 | | resnet50 | 79.038 (20.962) | 94.390 (5.610) | 25.6 | bicubic | 224 | | mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | bicubic | 224 | | efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.79 | bicubic | 240 | | efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | bicubic | 224 | | seresnext26t_32x4d | 77.998 (22.002) | 93.708 (6.292) | 16.8 | bicubic | 224 | | seresnext26tn_32x4d | 77.986 (22.014) | 93.746 (6.254) | 16.8 | bicubic | 224 | | efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.29 | bicubic | 224 | | seresnext26d_32x4d | 77.602 (22.398) | 93.608 (6.392) | 16.8 | bicubic | 224 | | mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | bicubic | 224 | | mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | bicubic | 224 | | resnet34d | 77.116 (22.884) | 93.382 (6.618) | 21.8 | bicubic | 224 | | seresnext26_32x4d | 77.104 (22.896) | 93.316 (6.684) | 16.8 | bicubic | 224 | | skresnet34 | 76.912 (23.088) | 93.322 (6.678) | 22.2 | bicubic | 224 | | ese_vovnet19b_dw | 76.798 (23.202) | 93.268 (6.732) | 6.5 | bicubic | 224 | | resnet26d | 76.68 (23.32) | 93.166 (6.834) | 16 | bicubic | 224 | | densenetblur121d | 76.576 (23.424) | 93.190 (6.810) | 8.0 | bicubic | 224 | | mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | bicubic | 224 | | mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | bicubic | 224 | | mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | bicubic | 224 | | mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | bicubic | 224 | | mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.89 | bicubic | 224 | | resnet26 | 75.292 (24.708) | 92.57 (7.43) | 16 | bicubic | 224 | | fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | bilinear | 224 | | resnet34 | 75.110 (24.890) | 92.284 (7.716) | 22 | bilinear | 224 | | mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | bicubic | 224 | | seresnet34 | 74.808 (25.192) | 92.124 (7.876) | 22 | bilinear | 224 | | mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.38 | bicubic | 224 | | spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.42 | bilinear | 224 | | skresnet18 | 73.038 (26.962) | 91.168 (8.832) | 11.9 | bicubic | 224 | | mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | bicubic | 224 | | resnet18d | 72.260 (27.740) | 90.696 (9.304) | 11.7 | bicubic | 224 | | seresnet18 | 71.742 (28.258) | 90.334 (9.666) | 11.8 | bicubic | 224 | ## Ported and Other Weights For weights ported from other deep learning frameworks (Tensorflow, MXNet GluonCV) or copied from other PyTorch sources, please see the full results tables for ImageNet and various OOD test sets at in the [results tables](https://github.com/rwightman/pytorch-image-models/tree/master/results). Model code .py files contain links to original sources of models and weights.
pytorch-image-models/hfdocs/source/results.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/results.mdx", "repo_id": "pytorch-image-models", "token_count": 2259 }
217
""" Quick n Simple Image Folder, Tarfile based DataSet Hacked together by / Copyright 2019, Ross Wightman """ import io import logging from typing import Optional import torch import torch.utils.data as data from PIL import Image from .readers import create_reader _logger = logging.getLogger(__name__) _ERROR_RETRY = 50 class ImageDataset(data.Dataset): def __init__( self, root, reader=None, split='train', class_map=None, load_bytes=False, input_img_mode='RGB', transform=None, target_transform=None, **kwargs, ): if reader is None or isinstance(reader, str): reader = create_reader( reader or '', root=root, split=split, class_map=class_map, **kwargs, ) self.reader = reader self.load_bytes = load_bytes self.input_img_mode = input_img_mode self.transform = transform self.target_transform = target_transform self._consecutive_errors = 0 def __getitem__(self, index): img, target = self.reader[index] try: img = img.read() if self.load_bytes else Image.open(img) except Exception as e: _logger.warning(f'Skipped sample (index {index}, file {self.reader.filename(index)}). {str(e)}') self._consecutive_errors += 1 if self._consecutive_errors < _ERROR_RETRY: return self.__getitem__((index + 1) % len(self.reader)) else: raise e self._consecutive_errors = 0 if self.input_img_mode and not self.load_bytes: img = img.convert(self.input_img_mode) if self.transform is not None: img = self.transform(img) if target is None: target = -1 elif self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.reader) def filename(self, index, basename=False, absolute=False): return self.reader.filename(index, basename, absolute) def filenames(self, basename=False, absolute=False): return self.reader.filenames(basename, absolute) class IterableImageDataset(data.IterableDataset): def __init__( self, root, reader=None, split='train', class_map=None, is_training=False, batch_size=1, num_samples=None, seed=42, repeats=0, download=False, input_img_mode='RGB', input_key=None, target_key=None, transform=None, target_transform=None, max_steps=None, ): assert reader is not None if isinstance(reader, str): self.reader = create_reader( reader, root=root, split=split, class_map=class_map, is_training=is_training, batch_size=batch_size, num_samples=num_samples, seed=seed, repeats=repeats, download=download, input_img_mode=input_img_mode, input_key=input_key, target_key=target_key, max_steps=max_steps, ) else: self.reader = reader self.transform = transform self.target_transform = target_transform self._consecutive_errors = 0 def __iter__(self): for img, target in self.reader: if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) yield img, target def __len__(self): if hasattr(self.reader, '__len__'): return len(self.reader) else: return 0 def set_epoch(self, count): # TFDS and WDS need external epoch count for deterministic cross process shuffle if hasattr(self.reader, 'set_epoch'): self.reader.set_epoch(count) def set_loader_cfg( self, num_workers: Optional[int] = None, ): # TFDS and WDS readers need # workers for correct # samples estimate before loader processes created if hasattr(self.reader, 'set_loader_cfg'): self.reader.set_loader_cfg(num_workers=num_workers) def filename(self, index, basename=False, absolute=False): assert False, 'Filename lookup by index not supported, use filenames().' def filenames(self, basename=False, absolute=False): return self.reader.filenames(basename, absolute) class AugMixDataset(torch.utils.data.Dataset): """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" def __init__(self, dataset, num_splits=2): self.augmentation = None self.normalize = None self.dataset = dataset if self.dataset.transform is not None: self._set_transforms(self.dataset.transform) self.num_splits = num_splits def _set_transforms(self, x): assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' self.dataset.transform = x[0] self.augmentation = x[1] self.normalize = x[2] @property def transform(self): return self.dataset.transform @transform.setter def transform(self, x): self._set_transforms(x) def _normalize(self, x): return x if self.normalize is None else self.normalize(x) def __getitem__(self, i): x, y = self.dataset[i] # all splits share the same dataset base transform x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) # run the full augmentation on the remaining splits for _ in range(self.num_splits - 1): x_list.append(self._normalize(self.augmentation(x))) return tuple(x_list), y def __len__(self): return len(self.dataset)
pytorch-image-models/timm/data/dataset.py/0
{ "file_path": "pytorch-image-models/timm/data/dataset.py", "repo_id": "pytorch-image-models", "token_count": 2955 }
218
""" A dataset reader that reads tarfile based datasets This reader can extract image samples from: * a single tar of image files * a folder of multiple tarfiles containing imagefiles * a tar of tars containing image files Labels are based on the combined folder and/or tar name structure. Hacked together by / Copyright 2020 Ross Wightman """ import logging import os import pickle import tarfile from glob import glob from typing import List, Tuple, Dict, Set, Optional, Union import numpy as np from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader _logger = logging.getLogger(__name__) CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' class TarState: def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): self.tf: tarfile.TarFile = tf self.ti: tarfile.TarInfo = ti self.children: Dict[str, TarState] = {} # child states (tars within tars) def reset(self): self.tf = None def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions: Set[str]): sample_count = 0 for i, ti in enumerate(tf): if not ti.isfile(): continue dirname, basename = os.path.split(ti.path) name, ext = os.path.splitext(basename) ext = ext.lower() if ext == '.tar': with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: child_info = dict( name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') parent_info['children'].append(child_info) elif ext in extensions: parent_info['samples'].append(ti) sample_count += 1 return sample_count def extract_tarinfos( root, class_name_to_idx: Optional[Dict] = None, cache_tarinfo: Optional[bool] = None, extensions: Optional[Union[List, Tuple, Set]] = None, sort: bool = True ): extensions = get_img_extensions(as_set=True) if not extensions else set(extensions) root_is_tar = False if os.path.isfile(root): assert os.path.splitext(root)[-1].lower() == '.tar' tar_filenames = [root] root, root_name = os.path.split(root) root_name = os.path.splitext(root_name)[0] root_is_tar = True else: root_name = root.strip(os.path.sep).split(os.path.sep)[-1] tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) num_tars = len(tar_filenames) tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) assert num_tars, f'No .tar files found at specified path ({root}).' _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') info = dict(tartrees=[]) cache_path = '' if cache_tarinfo is None: cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB if cache_tarinfo: cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX cache_path = os.path.join(root, cache_filename) if os.path.exists(cache_path): _logger.info(f'Reading tar info from cache file {cache_path}.') with open(cache_path, 'rb') as pf: info = pickle.load(pf) assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" else: for i, fn in enumerate(tar_filenames): path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) num_children = len(parent_info["children"]) _logger.debug( f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') info['tartrees'].append(parent_info) if cache_path: _logger.info(f'Writing tar info to cache file {cache_path}.') with open(cache_path, 'wb') as pf: pickle.dump(info, pf) samples = [] labels = [] build_class_map = False if class_name_to_idx is None: build_class_map = True # Flatten tartree info into lists of samples and targets w/ targets based on label id via # class map arg or from unique paths. # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children # this covers my current use cases and keeps things a little easier to test for now. tarfiles = [] def _label_from_paths(*path, leaf_only=True): path = os.path.join(*path).strip(os.path.sep) return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') def _add_samples(info, fn): added = 0 for s in info['samples']: label = _label_from_paths(info['path'], os.path.dirname(s.path)) if not build_class_map and label not in class_name_to_idx: continue samples.append((s, fn, info['ti'])) labels.append(label) added += 1 return added _logger.info(f'Collecting samples and building tar states.') for parent_info in info['tartrees']: # if tartree has children, we assume all samples are at the child level tar_name = None if root_is_tar else parent_info['name'] tar_state = TarState() parent_added = 0 for child_info in parent_info['children']: child_added = _add_samples(child_info, fn=tar_name) if child_added: tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) parent_added += child_added parent_added += _add_samples(parent_info, fn=tar_name) if parent_added: tarfiles.append((tar_name, tar_state)) del info if build_class_map: # build class index sorted_labels = list(sorted(set(labels), key=natural_key)) class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} _logger.info(f'Mapping targets and sorting samples.') samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] if sort: samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) samples, targets = zip(*samples_and_targets) samples = np.array(samples) targets = np.array(targets) _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') return samples, targets, class_name_to_idx, tarfiles class ReaderImageInTar(Reader): """ Multi-tarfile dataset reader where there is one .tar file per class """ def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): super().__init__() class_name_to_idx = None if class_map: class_name_to_idx = load_class_map(class_map, root) self.root = root self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( self.root, class_name_to_idx=class_name_to_idx, cache_tarinfo=cache_tarinfo ) self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} if len(tarfiles) == 1 and tarfiles[0][0] is None: self.root_is_tar = True self.tar_state = tarfiles[0][1] else: self.root_is_tar = False self.tar_state = dict(tarfiles) self.cache_tarfiles = cache_tarfiles def __len__(self): return len(self.samples) def __getitem__(self, index): sample = self.samples[index] target = self.targets[index] sample_ti, parent_fn, child_ti = sample parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root tf = None cache_state = None if self.cache_tarfiles: cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] tf = cache_state.tf if tf is None: tf = tarfile.open(parent_abs) if self.cache_tarfiles: cache_state.tf = tf if child_ti is not None: ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None if ctf is None: ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) if self.cache_tarfiles: cache_state.children[child_ti.name].tf = ctf tf = ctf return tf.extractfile(sample_ti), target def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename
pytorch-image-models/timm/data/readers/reader_image_in_tar.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_in_tar.py", "repo_id": "pytorch-image-models", "token_count": 4050 }
219
""" BlurPool layer inspired by - Kornia's Max_BlurPool2d - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` Hacked together by Chris Ha and Ross Wightman """ from functools import partial from typing import Optional, Type import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .padding import get_padding from .typing import LayerType class BlurPool2d(nn.Module): r"""Creates a module that computes blurs and downsample a given feature map. See :cite:`zhang2019shiftinvar` for more details. Corresponds to the Downsample class, which does blurring and subsampling Args: channels = Number of input channels filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. stride (int): downsampling filter stride Returns: torch.Tensor: the transformed tensor. """ def __init__( self, channels: Optional[int] = None, filt_size: int = 3, stride: int = 2, pad_mode: str = 'reflect', ) -> None: super(BlurPool2d, self).__init__() assert filt_size > 1 self.channels = channels self.filt_size = filt_size self.stride = stride self.pad_mode = pad_mode self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :] if channels is not None: blur_filter = blur_filter.repeat(self.channels, 1, 1, 1) self.register_buffer('filt', blur_filter, persistent=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.pad(x, self.padding, mode=self.pad_mode) if self.channels is None: channels = x.shape[1] weight = self.filt.expand(channels, 1, self.filt_size, self.filt_size) else: channels = self.channels weight = self.filt return F.conv2d(x, weight, stride=self.stride, groups=channels) def create_aa( aa_layer: LayerType, channels: Optional[int] = None, stride: int = 2, enable: bool = True, noop: Optional[Type[nn.Module]] = nn.Identity ) -> nn.Module: """ Anti-aliasing """ if not aa_layer or not enable: return noop() if noop is not None else None if isinstance(aa_layer, str): aa_layer = aa_layer.lower().replace('_', '').replace('-', '') if aa_layer == 'avg' or aa_layer == 'avgpool': aa_layer = nn.AvgPool2d elif aa_layer == 'blur' or aa_layer == 'blurpool': aa_layer = BlurPool2d elif aa_layer == 'blurpc': aa_layer = partial(BlurPool2d, pad_mode='constant') else: assert False, f"Unknown anti-aliasing layer ({aa_layer})." try: return aa_layer(channels=channels, stride=stride) except TypeError as e: return aa_layer(stride)
pytorch-image-models/timm/layers/blur_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/blur_pool.py", "repo_id": "pytorch-image-models", "token_count": 1352 }
220
""" 'Fast' Normalization Functions For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32. Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast) Hacked together by / Copyright 2022 Ross Wightman """ from typing import List, Optional import torch from torch.nn import functional as F try: from apex.normalization.fused_layer_norm import fused_layer_norm_affine has_apex = True except ImportError: has_apex = False try: from apex.normalization.fused_layer_norm import fused_rms_norm_affine, fused_rms_norm has_apex_rmsnorm = True except ImportError: has_apex_rmsnorm = False # fast (ie lower precision LN) can be disabled with this flag if issues crop up _USE_FAST_NORM = False # defaulting to False for now def is_fast_norm(): return _USE_FAST_NORM def set_fast_norm(enable=True): global _USE_FAST_NORM _USE_FAST_NORM = enable def fast_group_norm( x: torch.Tensor, num_groups: int, weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.group_norm(x, num_groups, weight, bias, eps) if torch.is_autocast_enabled(): # normally native AMP casts GN inputs to float32 # here we use the low precision autocast dtype # FIXME what to do re CPU autocast? dt = torch.get_autocast_gpu_dtype() x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.group_norm(x, num_groups, weight, bias, eps) def fast_layer_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.layer_norm(x, normalized_shape, weight, bias, eps) if has_apex: return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps) if torch.is_autocast_enabled(): # normally native AMP casts LN inputs to float32 # apex LN does not, this is behaving like Apex dt = torch.get_autocast_gpu_dtype() # FIXME what to do re CPU autocast? x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.layer_norm(x, normalized_shape, weight, bias, eps) def rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ): norm_ndim = len(normalized_shape) if torch.jit.is_scripting(): # ndim = len(x.shape) # dims = list(range(ndim - norm_ndim, ndim)) # this doesn't work on pytorch <= 1.13.x # NOTE -ve dims cause torchscript to crash in some cases, out of options to work around assert norm_ndim == 1 v = torch.var(x, dim=-1).unsqueeze(-1) # ts crashes with -ve dim + keepdim=True else: dims = tuple(range(-1, -norm_ndim - 1, -1)) v = torch.var(x, dim=dims, keepdim=True) x = x * torch.rsqrt(v + eps) if weight is not None: x = x * weight return x def fast_rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ) -> torch.Tensor: if torch.jit.is_scripting(): # this must be by itself, cannot merge with has_apex_rmsnorm return rms_norm(x, normalized_shape, weight, eps) if has_apex_rmsnorm: if weight is None: return fused_rms_norm(x, normalized_shape, eps) else: return fused_rms_norm_affine(x, weight, normalized_shape, eps) # fallback return rms_norm(x, normalized_shape, weight, eps)
pytorch-image-models/timm/layers/fast_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/fast_norm.py", "repo_id": "pytorch-image-models", "token_count": 1639 }
221
""" PyTorch Mixed Convolution Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv2d_same import create_conv2d_pad def _split_channels(num_chan, num_groups): split = [num_chan // num_groups for _ in range(num_groups)] split[0] += num_chan - sum(split) return split class MixedConv2d(nn.ModuleDict): """ Mixed Grouped Convolution Based on MDConv and GroupedConv in MixNet impl: https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = in_ch if depthwise else 1 # use add_module to keep key space clean self.add_module( str(idx), create_conv2d_pad( in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs) ) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [c(x_split[i]) for i, c in enumerate(self.values())] x = torch.cat(x_out, 1) return x
pytorch-image-models/timm/layers/mixed_conv2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/mixed_conv2d.py", "repo_id": "pytorch-image-models", "token_count": 834 }
222
""" Split Attention Conv2d (for ResNeSt Models) Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt Modified for torchscript compat, performance, and consistency with timm by Ross Wightman """ import torch import torch.nn.functional as F from torch import nn from .helpers import make_divisible class RadixSoftmax(nn.Module): def __init__(self, radix, cardinality): super(RadixSoftmax, self).__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttn(nn.Module): """Split-Attention (aka Splat) """ def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): super(SplitAttn, self).__init__() out_channels = out_channels or in_channels self.radix = radix mid_chs = out_channels * radix if rd_channels is None: attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) else: attn_chs = rd_channels * radix padding = kernel_size // 2 if padding is None else padding self.conv = nn.Conv2d( in_channels, mid_chs, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act0 = act_layer(inplace=True) self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() self.act1 = act_layer(inplace=True) self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) self.rsoftmax = RadixSoftmax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.drop(x) x = self.act0(x) B, RC, H, W = x.shape if self.radix > 1: x = x.reshape((B, self.radix, RC // self.radix, H, W)) x_gap = x.sum(dim=1) else: x_gap = x x_gap = x_gap.mean((2, 3), keepdim=True) x_gap = self.fc1(x_gap) x_gap = self.bn1(x_gap) x_gap = self.act1(x_gap) x_attn = self.fc2(x_gap) x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) if self.radix > 1: out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) else: out = x * x_attn return out.contiguous()
pytorch-image-models/timm/layers/split_attn.py/0
{ "file_path": "pytorch-image-models/timm/layers/split_attn.py", "repo_id": "pytorch-image-models", "token_count": 1533 }
223
""" EfficientNet, MobileNetV3, etc Builder Assembles EfficieNet and related network feature blocks from string definitions. Handles stride, dilation calculations, and selects feature extraction points. Hacked together by / Copyright 2019, Ross Wightman """ from typing import Callable, Optional import logging import math import re from copy import deepcopy from functools import partial from typing import Any, Dict, List import torch.nn as nn from timm.layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible, LayerType from ._efficientnet_blocks import * from ._manipulate import named_modules __all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights", 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] _logger = logging.getLogger(__name__) _DEBUG_BUILDER = False # Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per # papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) # NOTE: momentum varies btw .99 and .9997 depending on source # .99 in official TF TPU impl # .9997 (/w .999 in search space) for paper BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 BN_EPS_TF_DEFAULT = 1e-3 _BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) BlockArgs = List[List[Dict[str, Any]]] def get_bn_args_tf(): return _BN_ARGS_TF.copy() def resolve_bn_args(kwargs): bn_args = {} bn_momentum = kwargs.pop('bn_momentum', None) if bn_momentum is not None: bn_args['momentum'] = bn_momentum bn_eps = kwargs.pop('bn_eps', None) if bn_eps is not None: bn_args['eps'] = bn_eps return bn_args def resolve_act_layer(kwargs, default='relu'): return get_act_layer(kwargs.pop('act_layer', default)) def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): """Round number of filters based on depth multiplier.""" if not multiplier: return channels return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) def _log_info_if(msg, condition): if condition: _logger.info(msg) def _parse_ksize(ss): if ss.isdigit(): return int(ss) else: return [int(k) for k in ss.split('.')] def _decode_block_str(block_str): """ Decode block definition string Gets a list of block arg (dicts) through a string notation of arguments. E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip All args can exist in any order with the exception of the leading string which is assumed to indicate the block type. leading string - block type ( ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) r - number of repeat blocks, k - kernel size, s - strides (1-9), e - expansion ratio, c - output channels, se - squeeze/excitation ratio n - activation fn ('re', 'r6', 'hs', or 'sw') Args: block_str: a string representation of block arguments. Returns: A list of block args (dicts) Raises: ValueError: if the string def not properly specified (TODO) """ assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] # take the block type off the front ops = ops[1:] options = {} skip = None for op in ops: # string options being checked on individual basis, combine if they grow if op == 'noskip': skip = False # force no skip connection elif op == 'skip': skip = True # force a skip connection elif op.startswith('n'): # activation fn key = op[0] v = op[1:] if v == 're': value = get_act_layer('relu') elif v == 'r6': value = get_act_layer('relu6') elif v == 'hs': value = get_act_layer('hard_swish') elif v == 'sw': value = get_act_layer('swish') # aka SiLU elif v == 'mi': value = get_act_layer('mish') else: continue options[key] = value else: # all numeric options splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # if act_layer is None, the model default (passed to model init) will be used act_layer = options['n'] if 'n' in options else None start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def num_repeat = int(options['r']) # each type of block has different valid arguments, fill accordingly block_args = dict( block_type=block_type, out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer, ) if block_type == 'ir': block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=start_kernel_size, pw_kernel_size=end_kernel_size, exp_ratio=float(options['e']), se_ratio=float(options.get('se', 0.)), noskip=skip is False, s2d=int(options.get('d', 0)) > 0, )) if 'cc' in options: block_args['num_experts'] = int(options['cc']) elif block_type == 'ds' or block_type == 'dsa': block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=end_kernel_size, se_ratio=float(options.get('se', 0.)), pw_act=block_type == 'dsa', noskip=block_type == 'dsa' or skip is False, s2d=int(options.get('d', 0)) > 0, )) elif block_type == 'er': block_args.update(dict( exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=end_kernel_size, exp_ratio=float(options['e']), force_in_chs=force_in_chs, se_ratio=float(options.get('se', 0.)), noskip=skip is False, )) elif block_type == 'cn': block_args.update(dict( kernel_size=int(options['k']), skip=skip is True, )) elif block_type == 'uir': # override exp / proj kernels for start/end in uir block start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 0 end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 0 block_args.update(dict( dw_kernel_size_start=start_kernel_size, # overload exp ks arg for dw start dw_kernel_size_mid=_parse_ksize(options['k']), dw_kernel_size_end=end_kernel_size, # overload pw ks arg for dw end exp_ratio=float(options['e']), se_ratio=float(options.get('se', 0.)), noskip=skip is False, )) elif block_type == 'mha': kv_dim = int(options['d']) block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), num_heads=int(options['h']), key_dim=kv_dim, value_dim=kv_dim, kv_stride=int(options.get('v', 1)), noskip=skip is False, )) elif block_type == 'mqa': kv_dim = int(options['d']) block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), num_heads=int(options['h']), key_dim=kv_dim, value_dim=kv_dim, kv_stride=int(options.get('v', 1)), noskip=skip is False, )) else: assert False, 'Unknown block type (%s)' % block_type if 'gs' in options: block_args['group_size'] = int(options['gs']) return block_args, num_repeat def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): """ Per-stage depth scaling Scales the block repeats in each stage. This depth scaling impl maintains compatibility with the EfficientNet scaling method, while allowing sensible scaling for other models that may have multiple block arg definitions in each stage. """ # We scale the total repeat count for each stage, there may be multiple # block arg defs per stage so we need to sum. num_repeat = sum(repeats) if depth_trunc == 'round': # Truncating to int by rounding allows stages with few repeats to remain # proportionally smaller for longer. This is a good choice when stage definitions # include single repeat stages that we'd prefer to keep that way as long as possible num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) else: # The default for EfficientNet truncates repeats to int via 'ceil'. # Any multiplier > 1.0 will result in an increased depth for every stage. num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) # Proportionally distribute repeat count scaling to each block definition in the stage. # Allocation is done in reverse as it results in the first block being less likely to be scaled. # The first block makes less sense to repeat in most of the arch definitions. repeats_scaled = [] for r in repeats[::-1]: rs = max(1, round((r / num_repeat * num_repeat_scaled))) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::-1] # Apply the calculated scaling to each block arg in the stage sa_scaled = [] for ba, rep in zip(stack_args, repeats_scaled): sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) return sa_scaled def decode_arch_def( arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False, group_size=None, ): """ Decode block architecture definition strings -> block kwargs Args: arch_def: architecture definition strings, list of list of strings depth_multiplier: network depth multiplier depth_trunc: networ depth truncation mode when applying multiplier experts_multiplier: CondConv experts multiplier fix_first_last: fix first and last block depths when multiplier is applied group_size: group size override for all blocks that weren't explicitly set in arch string Returns: list of list of block kwargs """ arch_args = [] if isinstance(depth_multiplier, tuple): assert len(depth_multiplier) == len(arch_def) else: depth_multiplier = (depth_multiplier,) * len(arch_def) for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): assert isinstance(block_strings, list) stack_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) ba, rep = _decode_block_str(block_str) if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: ba['num_experts'] *= experts_multiplier if group_size is not None: ba.setdefault('group_size', group_size) stack_args.append(ba) repeats.append(rep) if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) else: arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) return arch_args class EfficientNetBuilder: """ Build Trunk Blocks This ended up being somewhat of a cross between https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py and https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py """ def __init__( self, output_stride: int = 32, pad_type: str = '', round_chs_fn: Callable = round_channels, se_from_exp: bool = False, act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, drop_path_rate: float = 0., layer_scale_init_value: Optional[float] = None, feature_location: str = '', ): self.output_stride = output_stride self.pad_type = pad_type self.round_chs_fn = round_chs_fn self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs self.act_layer = act_layer self.norm_layer = norm_layer self.aa_layer = aa_layer self.se_layer = get_attn(se_layer) try: self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg self.se_has_ratio = True except TypeError: self.se_has_ratio = False self.drop_path_rate = drop_path_rate self.layer_scale_init_value = layer_scale_init_value if feature_location == 'depthwise': # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") feature_location = 'expansion' self.feature_location = feature_location assert feature_location in ('bottleneck', 'expansion', '') self.verbose = _DEBUG_BUILDER # state updated during build, consumed by model self.in_chs = None self.features = [] def _make_block(self, ba, block_idx, block_count): drop_path_rate = self.drop_path_rate * block_idx / block_count bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self.round_chs_fn(ba['out_chs']) s2d = ba.get('s2d', 0) if s2d > 0: # adjust while space2depth active ba['out_chs'] *= 4 if 'force_in_chs' in ba and ba['force_in_chs']: # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) ba['pad_type'] = self.pad_type # block act fn overrides the model default ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer assert ba['act_layer'] is not None ba['norm_layer'] = self.norm_layer ba['drop_path_rate'] = drop_path_rate if self.aa_layer is not None: ba['aa_layer'] = self.aa_layer se_ratio = ba.pop('se_ratio', None) if se_ratio and self.se_layer is not None: if not self.se_from_exp: # adjust se_ratio by expansion ratio if calculating se channels from block input se_ratio /= ba.get('exp_ratio', 1.0) if s2d == 1: # adjust for start of space2depth se_ratio /= 4 if self.se_has_ratio: ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) else: ba['se_layer'] = self.se_layer if bt == 'ir': _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) elif bt == 'ds' or bt == 'dsa': _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = DepthwiseSeparableConv(**ba) elif bt == 'er': _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = EdgeResidual(**ba) elif bt == 'cn': _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = ConvBnAct(**ba) elif bt == 'uir': _log_info_if(' UniversalInvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = UniversalInvertedResidual(**ba, layer_scale_init_value=self.layer_scale_init_value) elif bt == 'mqa': _log_info_if(' MobileMultiQueryAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = MobileAttention(**ba, use_multi_query=True, layer_scale_init_value=self.layer_scale_init_value) elif bt == 'mha': _log_info_if(' MobileMultiHeadAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = MobileAttention(**ba, layer_scale_init_value=self.layer_scale_init_value) else: assert False, 'Unknown block type (%s) while building model.' % bt self.in_chs = ba['out_chs'] # update in_chs for arg of next block return block def __call__(self, in_chs, model_block_args): """ Build the blocks Args: in_chs: Number of input-channels passed to first block model_block_args: A list of lists, outer list defines stages, inner list contains strings defining block configuration(s) Return: List of block stacks (each stack wrapped in nn.Sequential) """ _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) self.in_chs = in_chs total_block_count = sum([len(x) for x in model_block_args]) total_block_idx = 0 current_stride = 2 current_dilation = 1 stages = [] if model_block_args[0][0]['stride'] > 1: # if the first block starts with a stride, we need to extract first level feat from stem feature_info = dict(module='bn1', num_chs=in_chs, stage=0, reduction=current_stride) self.features.append(feature_info) # outer list of block_args defines the stacks space2depth = 0 for stack_idx, stack_args in enumerate(model_block_args): last_stack = stack_idx + 1 == len(model_block_args) _log_info_if('Stack: {}'.format(stack_idx), self.verbose) assert isinstance(stack_args, list) blocks = [] # each stack (stage of blocks) contains a list of block arguments for block_idx, block_args in enumerate(stack_args): last_block = block_idx + 1 == len(stack_args) _log_info_if(' Block: {}'.format(block_idx), self.verbose) assert block_args['stride'] in (1, 2) if block_idx >= 1: # only the first block in any stack can have a stride > 1 block_args['stride'] = 1 if not space2depth and block_args.pop('s2d', False): assert block_args['stride'] == 1 space2depth = 1 if space2depth > 0: # FIXME s2d is a WIP if space2depth == 2 and block_args['stride'] == 2: block_args['stride'] = 1 # to end s2d region, need to correct expansion and se ratio relative to input block_args['exp_ratio'] /= 4 space2depth = 0 else: block_args['s2d'] = space2depth extract_features = False if last_block: next_stack_idx = stack_idx + 1 extract_features = next_stack_idx >= len(model_block_args) or \ model_block_args[next_stack_idx][0]['stride'] > 1 next_dilation = current_dilation if block_args['stride'] > 1: next_output_stride = current_stride * block_args['stride'] if next_output_stride > self.output_stride: next_dilation = current_dilation * block_args['stride'] block_args['stride'] = 1 _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( self.output_stride), self.verbose) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if next_dilation != current_dilation: current_dilation = next_dilation # create the block block = self._make_block(block_args, total_block_idx, total_block_count) blocks.append(block) if space2depth == 1: space2depth = 2 # stash feature module name and channel info for model feature extraction if extract_features: feature_info = dict( stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location), ) leaf_name = feature_info.get('module', '') if leaf_name: feature_info['module'] = '.'.join([f'blocks.{stack_idx}.{block_idx}', leaf_name]) else: assert last_block feature_info['module'] = f'blocks.{stack_idx}' self.features.append(feature_info) total_block_idx += 1 # incr global block idx (across all stacks) stages.append(nn.Sequential(*blocks)) return stages def _init_weight_goog(m, n='', fix_group_fanout=True): """ Weight initialization as per Tensorflow official implementations. Args: m (nn.Module): module to init n (str): module name fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py """ if isinstance(m, CondConv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups init_weight_fn = get_condconv_initializer( lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) init_weight_fn(m.weight) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): fan_out = m.weight.size(0) # fan-out fan_in = 0 if 'routing_fn' in n: fan_in = m.weight.size(1) init_range = 1.0 / math.sqrt(fan_in + fan_out) nn.init.uniform_(m.weight, -init_range, init_range) nn.init.zeros_(m.bias) def efficientnet_init_weights(model: nn.Module, init_fn=None): init_fn = init_fn or _init_weight_goog for n, m in model.named_modules(): init_fn(m, n) # iterate and call any module.init_weights() fn, children first for n, m in named_modules(model): if hasattr(m, 'init_weights'): m.init_weights()
pytorch-image-models/timm/models/_efficientnet_builder.py/0
{ "file_path": "pytorch-image-models/timm/models/_efficientnet_builder.py", "repo_id": "pytorch-image-models", "token_count": 10986 }
224
""" Bring-Your-Own-Attention Network A flexible network w/ dataclass based config for stacking NN blocks including self-attention (or similar) layers. Currently used to implement experimental variants of: * Bottleneck Transformers * Lambda ResNets * HaloNets Consider all of the models definitions here as experimental WIP and likely to change. Hacked together by / copyright Ross Wightman, 2021. """ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks __all__ = [] model_cfgs = dict( botnet26t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', fixed_input_size=True, self_attn_layer='bottleneck', self_attn_kwargs=dict() ), sebotnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', num_features=1280, attn_layer='se', self_attn_layer='bottleneck', self_attn_kwargs=dict() ), botnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', fixed_input_size=True, self_attn_layer='bottleneck', self_attn_kwargs=dict() ), eca_botnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', fixed_input_size=True, act_layer='silu', attn_layer='eca', self_attn_layer='bottleneck', self_attn_kwargs=dict(dim_head=16) ), halonet_h1=ByoModelCfg( blocks=( ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), ), stem_chs=64, stem_type='7x7', stem_pool='maxpool', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3), ), halonet26t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2) ), sehalonet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', num_features=1280, attn_layer='se', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3) ), halonet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks( types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3) ), eca_halonext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) ), lambda_resnet26t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='lambda', self_attn_kwargs=dict(r=9) ), lambda_resnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', self_attn_layer='lambda', self_attn_kwargs=dict(r=9) ), lambda_resnet26rpt_256=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='lambda', self_attn_kwargs=dict(r=None) ), # experimental haloregnetz_b=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), self_attn_layer='halo', self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33) ), # experimental lamhalobotnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks( types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='lambda', self_attn_kwargs=dict(r=13)), interleave_blocks( types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks( types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, self_attn_layer='bottleneck', self_attn_kwargs=dict()), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', ), halo2botnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks( types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks( types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks( types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, self_attn_layer='bottleneck', self_attn_kwargs=dict()), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', ), ) def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', 'fixed_input_size': False, 'min_input_size': (3, 224, 224), **kwargs } default_cfgs = generate_default_cfgs({ # GPU-Efficient (ResNet) weights 'botnet26t_256.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'sebotnet33ts_256.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'botnet50ts_256.untrained': _cfg( fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'eca_botnext26ts_256.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halonet_h1.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet26t.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'sehalonet33ts.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'halonet50ts.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'eca_halonext26ts.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'lambda_resnet26t.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth', hf_hub_id='timm/', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'lambda_resnet50ts.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth', hf_hub_id='timm/', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), 'lambda_resnet26rpt_256.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'haloregnetz_b.ra3_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94), 'lamhalobotnet50ts_256.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halo2botnet50ts_256.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), }) @register_model def botnet26t_256(pretrained=False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ ResNet26-T backbone. """ kwargs.setdefault('img_size', 256) return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) @register_model def sebotnet33ts_256(pretrained=False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, """ return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs) @register_model def botnet50ts_256(pretrained=False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. """ kwargs.setdefault('img_size', 256) return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) @register_model def eca_botnext26ts_256(pretrained=False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. """ kwargs.setdefault('img_size', 256) return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) @register_model def halonet_h1(pretrained=False, **kwargs) -> ByobNet: """ HaloNet-H1. Halo attention in all stages as per the paper. NOTE: This runs very slowly! """ return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) @register_model def halonet26t(pretrained=False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages """ return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) @register_model def sehalonet33ts(pretrained=False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. """ return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) @register_model def halonet50ts(pretrained=False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages """ return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) @register_model def eca_halonext26ts(pretrained=False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages """ return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) @register_model def lambda_resnet26t(pretrained=False, **kwargs) -> ByobNet: """ Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages. """ return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) @register_model def lambda_resnet50ts(pretrained=False, **kwargs) -> ByobNet: """ Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages. """ return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) @register_model def lambda_resnet26rpt_256(pretrained=False, **kwargs) -> ByobNet: """ Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages. """ kwargs.setdefault('img_size', 256) return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) @register_model def haloregnetz_b(pretrained=False, **kwargs) -> ByobNet: """ Halo + RegNetZ """ return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs) @register_model def lamhalobotnet50ts_256(pretrained=False, **kwargs) -> ByobNet: """ Combo Attention (Lambda + Halo + Bot) Network """ return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs) @register_model def halo2botnet50ts_256(pretrained=False, **kwargs) -> ByobNet: """ Combo Attention (Halo + Halo + Bot) Network """ return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/byoanet.py/0
{ "file_path": "pytorch-image-models/timm/models/byoanet.py", "repo_id": "pytorch-image-models", "token_count": 9703 }
225
""" EfficientFormer-V2 @article{ li2022rethinking, title={Rethinking Vision Transformers for MobileNet Size and Speed}, author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian}, journal={arXiv preprint arXiv:2212.08059}, year={2022} } Significantly refactored and cleaned up for timm from original at: https://github.com/snap-research/EfficientFormer Original code licensed Apache 2.0, Copyright (c) 2022 Snap Inc. Modifications and timm support by / Copyright 2023, Ross Wightman """ import math from functools import partial from typing import Dict, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_conv2d, create_norm_layer, get_act_layer, get_norm_layer, ConvNormAct from timm.layers import DropPath, trunc_normal_, to_2tuple, to_ntuple, ndgrid from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['EfficientFormerV2'] EfficientFormer_width = { 'L': (40, 80, 192, 384), # 26m 83.3% 6attn 'S2': (32, 64, 144, 288), # 12m 81.6% 4attn dp0.02 'S1': (32, 48, 120, 224), # 6.1m 79.0 'S0': (32, 48, 96, 176), # 75.0 75.7 } EfficientFormer_depth = { 'L': (5, 5, 15, 10), # 26m 83.3% 'S2': (4, 4, 12, 8), # 12m 'S1': (3, 3, 9, 6), # 79.0 'S0': (2, 2, 6, 4), # 75.7 } EfficientFormer_expansion_ratios = { 'L': (4, 4, (4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 4, 3, 3, 3, 3, 4, 4, 4)), 'S2': (4, 4, (4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 3, 3, 3, 3, 4, 4)), 'S1': (4, 4, (4, 4, 3, 3, 3, 3, 4, 4, 4), (4, 4, 3, 3, 4, 4)), 'S0': (4, 4, (4, 3, 3, 3, 4, 4), (4, 3, 3, 4)), } class ConvNorm(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=True, norm_layer='batchnorm2d', norm_kwargs=None, ): norm_kwargs = norm_kwargs or {} super(ConvNorm, self).__init__() self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, ) self.bn = create_norm_layer(norm_layer, out_channels, **norm_kwargs) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class Attention2d(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=32, num_heads=8, attn_ratio=4, resolution=7, act_layer=nn.GELU, stride=None, ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim resolution = to_2tuple(resolution) if stride is not None: resolution = tuple([math.ceil(r / stride) for r in resolution]) self.stride_conv = ConvNorm(dim, dim, kernel_size=3, stride=stride, groups=dim) self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear') else: self.stride_conv = None self.upsample = None self.resolution = resolution self.N = self.resolution[0] * self.resolution[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio kh = self.key_dim * self.num_heads self.q = ConvNorm(dim, kh) self.k = ConvNorm(dim, kh) self.v = ConvNorm(dim, self.dh) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, groups=self.dh) self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.act = act_layer() self.proj = ConvNorm(self.dh, dim, 1) pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, self.N)) self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos), persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape if self.stride_conv is not None: x = self.stride_conv(x) q = self.q(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = (q @ k) * self.scale attn = attn + self.get_attention_biases(x.device) attn = self.talking_head1(attn) attn = attn.softmax(dim=-1) attn = self.talking_head2(attn) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution[0], self.resolution[1]) + v_local if self.upsample is not None: x = self.upsample(x) x = self.act(x) x = self.proj(x) return x class LocalGlobalQuery(torch.nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.pool = nn.AvgPool2d(1, 2, 0) self.local = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=2, padding=1, groups=in_dim) self.proj = ConvNorm(in_dim, out_dim, 1) def forward(self, x): local_q = self.local(x) pool_q = self.pool(x) q = local_q + pool_q q = self.proj(q) return q class Attention2dDownsample(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=16, num_heads=8, attn_ratio=4, resolution=7, out_dim=None, act_layer=nn.GELU, ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.resolution = to_2tuple(resolution) self.resolution2 = tuple([math.ceil(r / 2) for r in self.resolution]) self.N = self.resolution[0] * self.resolution[1] self.N2 = self.resolution2[0] * self.resolution2[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio self.out_dim = out_dim or dim kh = self.key_dim * self.num_heads self.q = LocalGlobalQuery(dim, kh) self.k = ConvNorm(dim, kh, 1) self.v = ConvNorm(dim, self.dh, 1) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, stride=2, groups=self.dh) self.act = act_layer() self.proj = ConvNorm(self.dh, self.out_dim, 1) self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N)) k_pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) q_pos = torch.stack(ndgrid( torch.arange(0, self.resolution[0], step=2), torch.arange(0, self.resolution[1], step=2) )).flatten(1) rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape q = self.q(x).reshape(B, self.num_heads, -1, self.N2).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = (q @ k) * self.scale attn = attn + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution2[0], self.resolution2[1]) + v_local x = self.act(x) x = self.proj(x) return x class Downsample(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=2, padding=1, resolution=7, use_attn=False, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, ): super().__init__() kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) norm_layer = norm_layer or nn.Identity() self.conv = ConvNorm( in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding, norm_layer=norm_layer, ) if use_attn: self.attn = Attention2dDownsample( dim=in_chs, out_dim=out_chs, resolution=resolution, act_layer=act_layer, ) else: self.attn = None def forward(self, x): out = self.conv(x) if self.attn is not None: return self.attn(x) + out return out class ConvMlpWithNorm(nn.Module): """ Implementation of MLP with 1*1 convolutions. Input: tensor with shape [B, C, H, W] """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, drop=0., mid_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = ConvNormAct( in_features, hidden_features, 1, bias=True, norm_layer=norm_layer, act_layer=act_layer) if mid_conv: self.mid = ConvNormAct( hidden_features, hidden_features, 3, groups=hidden_features, bias=True, norm_layer=norm_layer, act_layer=act_layer) else: self.mid = nn.Identity() self.drop1 = nn.Dropout(drop) self.fc2 = ConvNorm(hidden_features, out_features, 1, norm_layer=norm_layer) self.drop2 = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.mid(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class EfficientFormerV2Block(nn.Module): def __init__( self, dim, mlp_ratio=4., act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, proj_drop=0., drop_path=0., layer_scale_init_value=1e-5, resolution=7, stride=None, use_attn=True, ): super().__init__() if use_attn: self.token_mixer = Attention2d( dim, resolution=resolution, act_layer=act_layer, stride=stride, ) self.ls1 = LayerScale2d( dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() else: self.token_mixer = None self.ls1 = None self.drop_path1 = None self.mlp = ConvMlpWithNorm( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=proj_drop, mid_conv=True, ) self.ls2 = LayerScale2d( dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): if self.token_mixer is not None: x = x + self.drop_path1(self.ls1(self.token_mixer(x))) x = x + self.drop_path2(self.ls2(self.mlp(x))) return x class Stem4(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = 4 self.conv1 = ConvNormAct( in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer ) self.conv2 = ConvNormAct( out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer ) class EfficientFormerV2Stage(nn.Module): def __init__( self, dim, dim_out, depth, resolution=7, downsample=True, block_stride=None, downsample_use_attn=False, block_use_attn=False, num_vit=1, mlp_ratio=4., proj_drop=.0, drop_path=0., layer_scale_init_value=1e-5, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, ): super().__init__() self.grad_checkpointing = False mlp_ratio = to_ntuple(depth)(mlp_ratio) resolution = to_2tuple(resolution) if downsample: self.downsample = Downsample( dim, dim_out, use_attn=downsample_use_attn, resolution=resolution, norm_layer=norm_layer, act_layer=act_layer, ) dim = dim_out resolution = tuple([math.ceil(r / 2) for r in resolution]) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): remain_idx = depth - num_vit - 1 b = EfficientFormerV2Block( dim, resolution=resolution, stride=block_stride, mlp_ratio=mlp_ratio[block_idx], use_attn=block_use_attn and block_idx > remain_idx, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer, ) blocks += [b] self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EfficientFormerV2(nn.Module): def __init__( self, depths, in_chans=3, img_size=224, global_pool='avg', embed_dims=None, downsamples=None, mlp_ratios=4, norm_layer='batchnorm2d', norm_eps=1e-5, act_layer='gelu', num_classes=1000, drop_rate=0., proj_drop_rate=0., drop_path_rate=0., layer_scale_init_value=1e-5, num_vit=0, distillation=True, ): super().__init__() assert global_pool in ('avg', '') self.num_classes = num_classes self.global_pool = global_pool self.feature_info = [] img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) act_layer = get_act_layer(act_layer) self.stem = Stem4(in_chans, embed_dims[0], act_layer=act_layer, norm_layer=norm_layer) prev_dim = embed_dims[0] stride = 4 num_stages = len(depths) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) stages = [] for i in range(num_stages): curr_resolution = tuple([math.ceil(s / stride) for s in img_size]) stage = EfficientFormerV2Stage( prev_dim, embed_dims[i], depth=depths[i], resolution=curr_resolution, downsample=downsamples[i], block_stride=2 if i == 2 else None, downsample_use_attn=i >= 3, block_use_attn=i >= 2, num_vit=num_vit, mlp_ratio=mlp_ratios[i], proj_drop=proj_drop_rate, drop_path=dpr[i], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer, ) if downsamples[i]: stride *= 2 prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{i}')] stages.append(stage) self.stages = nn.Sequential(*stages) # Classifier head self.num_features = self.head_hidden_size = embed_dims[-1] self.norm = norm_layer(embed_dims[-1]) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.dist = distillation if self.dist: self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() else: self.head_dist = None self.apply(self.init_weights) self.distilled_training = False # init for classification def init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {k for k, _ in self.named_parameters() if 'attention_biases' in k} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head, self.head_dist def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=(2, 3)) x = self.head_drop(x) if pre_logits: return x x, x_dist = self.head(x), self.head_dist(x) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train/finetune, inference average the classifier predictions return (x + x_dist) / 2 def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': .95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': ('head', 'head_dist'), 'first_conv': 'stem.conv1.conv', **kwargs } default_cfgs = generate_default_cfgs({ 'efficientformerv2_s0.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_s1.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_s2.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_l.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), }) def _create_efficientformerv2(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( EfficientFormerV2, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def efficientformerv2_s0(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S0'], embed_dims=EfficientFormer_width['S0'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S0'], ) return _create_efficientformerv2('efficientformerv2_s0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s1(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S1'], embed_dims=EfficientFormer_width['S1'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S1'], ) return _create_efficientformerv2('efficientformerv2_s1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s2(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S2'], embed_dims=EfficientFormer_width['S2'], num_vit=4, drop_path_rate=0.02, mlp_ratios=EfficientFormer_expansion_ratios['S2'], ) return _create_efficientformerv2('efficientformerv2_s2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_l(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['L'], embed_dims=EfficientFormer_width['L'], num_vit=6, drop_path_rate=0.1, mlp_ratios=EfficientFormer_expansion_ratios['L'], ) return _create_efficientformerv2('efficientformerv2_l', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/efficientformer_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientformer_v2.py", "repo_id": "pytorch-image-models", "token_count": 12757 }
226
import math from copy import deepcopy from functools import partial from typing import Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, ClNormMlpClassifierHead, LayerScale, \ get_norm_layer, get_act_layer, init_weight_jax, init_weight_vit, to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations def window_partition(x, window_size: Tuple[int, int]): """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows def window_unpartition(windows: torch.Tensor, window_size: Tuple[int, int], hw: Tuple[int, int]): """ Window unpartition into original sequences and removing padding. Args: x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ H, W = hw B = windows.shape[0] // (H * W // window_size[0] // window_size[1]) x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x def _calc_pad(H: int, W: int, window_size: Tuple[int, int]) -> Tuple[int, int, int, int]: pad_h = (window_size[0] - H % window_size[0]) % window_size[0] pad_w = (window_size[1] - W % window_size[1]) % window_size[1] Hp, Wp = H + pad_h, W + pad_w return Hp, Wp, pad_h, pad_w class MultiScaleAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, dim_out: int, num_heads: int, q_pool: nn.Module = None, ): super().__init__() self.dim = dim self.dim_out = dim_out self.num_heads = num_heads head_dim = dim_out // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q_pool = q_pool self.qkv = nn.Linear(dim, dim_out * 3) self.proj = nn.Linear(dim_out, dim_out) def forward(self, x: torch.Tensor) -> torch.Tensor: B, H, W, _ = x.shape # qkv with shape (B, H * W, 3, nHead, C) qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) # q, k, v with shape (B, H * W, nheads, C) q, k, v = torch.unbind(qkv, 2) # Q pooling (for downsample at stage changes) if self.q_pool is not None: q = q.reshape(B, H, W, -1).permute(0, 3, 1, 2) # to BCHW for pool q = self.q_pool(q).permute(0, 2, 3, 1) H, W = q.shape[1:3] # downsampled shape q = q.reshape(B, H * W, self.num_heads, -1) # Torch's SDPA expects [B, nheads, H*W, C] so we transpose q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) x = attn @ v # Transpose back x = x.transpose(1, 2).reshape(B, H, W, -1) x = self.proj(x) return x class MultiScaleBlock(nn.Module): def __init__( self, dim: int, dim_out: int, num_heads: int, mlp_ratio: float = 4.0, q_stride: Optional[Tuple[int, int]] = None, norm_layer: Union[nn.Module, str] = "LayerNorm", act_layer: Union[nn.Module, str] = "GELU", window_size: int = 0, init_values: Optional[float] = None, drop_path: float = 0.0, ): super().__init__() norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) self.window_size = to_2tuple(window_size) self.is_windowed = any(self.window_size) self.dim = dim self.dim_out = dim_out self.q_stride = q_stride if dim != dim_out: self.proj = nn.Linear(dim, dim_out) else: self.proj = nn.Identity() self.pool = None if self.q_stride: # note make a different instance for this Module so that it's not shared with attn module self.pool = nn.MaxPool2d( kernel_size=q_stride, stride=q_stride, ceil_mode=False, ) self.norm1 = norm_layer(dim) self.attn = MultiScaleAttention( dim, dim_out, num_heads=num_heads, q_pool=deepcopy(self.pool), ) self.ls1 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = Mlp( dim_out, int(dim_out * mlp_ratio), act_layer=act_layer, ) self.ls2 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x # B, H, W, C x = self.norm1(x) # Skip connection if self.dim != self.dim_out: shortcut = self.proj(x) if self.pool is not None: shortcut = shortcut.permute(0, 3, 1, 2) shortcut = self.pool(shortcut).permute(0, 2, 3, 1) # Window partition window_size = self.window_size H, W = x.shape[1:3] Hp, Wp = H, W # keep torchscript happy if self.is_windowed: Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size) x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) x = window_partition(x, window_size) # Window Attention + Q Pooling (if stage change) x = self.attn(x) if self.q_stride is not None: # Shapes have changed due to Q pooling window_size = (self.window_size[0] // self.q_stride[0], self.window_size[1] // self.q_stride[1]) H, W = shortcut.shape[1:3] Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size) # Reverse window partition if self.is_windowed: x = window_unpartition(x, window_size, (Hp, Wp)) x = x[:, :H, :W, :].contiguous() # unpad x = shortcut + self.drop_path1(self.ls1(x)) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class HieraPatchEmbed(nn.Module): """ Image to Patch Embedding. """ def __init__( self, kernel_size: Tuple[int, ...] = (7, 7), stride: Tuple[int, ...] = (4, 4), padding: Tuple[int, ...] = (3, 3), in_chans: int = 3, embed_dim: int = 768, ): """ Args: kernel_size (Tuple): kernel size of the projection layer. stride (Tuple): stride of the projection layer. padding (Tuple): padding size of the projection layer. in_chans (int): Number of input image channels. embed_dim (int): embed_dim (int): Patch embedding dimension. """ super().__init__() self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) # B C H W -> B H W C x = x.permute(0, 2, 3, 1) return x class HieraDet(nn.Module): """ Reference: https://arxiv.org/abs/2306.00989 """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, # initial embed dim num_heads: int = 1, # initial number of heads patch_kernel: Tuple[int, ...] = (7, 7), patch_stride: Tuple[int, ...] = (4, 4), patch_padding: Tuple[int, ...] = (3, 3), patch_size: Optional[Tuple[int, ...]] = None, q_pool: int = 3, # number of q_pool stages q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage dim_mul: float = 2.0, # dim_mul factor at stage shift head_mul: float = 2.0, # head_mul factor at stage shift global_pos_size: Tuple[int, int] = (7, 7), # window size per stage, when not using global att. window_spec: Tuple[int, ...] = ( 8, 4, 14, 7, ), # global attn in these blocks global_att_blocks: Tuple[int, ...] = ( 12, 16, 20, ), init_values: Optional[float] = None, weight_init: str = '', fix_init: bool = True, head_init_scale: float = 0.001, drop_rate: float = 0.0, drop_path_rate: float = 0.0, # stochastic depth norm_layer: Union[nn.Module, str] = "LayerNorm", act_layer: Union[nn.Module, str] = "GELU", ): super().__init__() norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) assert len(stages) == len(window_spec) self.num_classes = num_classes self.window_spec = window_spec self.output_fmt = 'NHWC' depth = sum(stages) self.q_stride = q_stride self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] assert 0 <= q_pool <= len(self.stage_ends[:-1]) self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] if patch_size is not None: # use a non-overlapping vit style patch embed self.patch_embed = PatchEmbed( img_size=None, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, output_fmt='NHWC', dynamic_img_pad=True, ) else: self.patch_embed = HieraPatchEmbed( kernel_size=patch_kernel, stride=patch_stride, padding=patch_padding, in_chans=in_chans, embed_dim=embed_dim, ) # Which blocks have global att? self.global_att_blocks = global_att_blocks # Windowed positional embedding (https://arxiv.org/abs/2311.05613) self.global_pos_size = global_pos_size self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.global_pos_size)) self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule cur_stage = 0 self.blocks = nn.Sequential() self.feature_info = [] for i in range(depth): dim_out = embed_dim # lags by a block, so first block of # next stage uses an initial window size # of previous stage and final window size of current stage window_size = self.window_spec[cur_stage] if self.global_att_blocks is not None: window_size = 0 if i in self.global_att_blocks else window_size if i - 1 in self.stage_ends: dim_out = int(embed_dim * dim_mul) num_heads = int(num_heads * head_mul) cur_stage += 1 block = MultiScaleBlock( dim=embed_dim, dim_out=dim_out, num_heads=num_heads, drop_path=dpr[i], q_stride=self.q_stride if i in self.q_pool_blocks else None, window_size=window_size, norm_layer=norm_layer, act_layer=act_layer, ) embed_dim = dim_out self.blocks.append(block) if i in self.stage_ends: self.feature_info += [ dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')] self.num_features = self.head_hidden_size = embed_dim self.head = ClNormMlpClassifierHead( embed_dim, num_classes, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, ) # Initialize everything if self.pos_embed is not None: nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.pos_embed_window is not None: nn.init.trunc_normal_(self.pos_embed_window, std=0.02) if weight_init != 'skip': init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit init_fn = partial(init_fn, classifier_name='head.fc') named_apply(init_fn, self) if fix_init: self.fix_init_weight() if isinstance(self.head, ClNormMlpClassifierHead) and isinstance(self.head.fc, nn.Linear): self.head.fc.weight.data.mul_(head_init_scale) self.head.fc.bias.data.mul_(head_init_scale) def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: h, w = x.shape[1:3] window_embed = self.pos_embed_window pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") tile_h = pos_embed.shape[-2] // window_embed.shape[-2] tile_w = pos_embed.shape[-1] // window_embed.shape[-1] pos_embed = pos_embed + window_embed.tile((tile_h, tile_w)) pos_embed = pos_embed.permute(0, 2, 3, 1) return x + pos_embed def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): return ['pos_embed', 'pos_embed_window'] @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict: return dict( stem=r'^pos_embed|pos_embed_window|patch_embed', blocks=[(r'^blocks\.(\d+)', None)] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool, reset_other=reset_other) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = True, output_fmt: str = 'NCHW', intermediates_only: bool = False, coarse: bool = True, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features coarse: Take coarse features (stage ends) if true, otherwise all block featrures Returns: """ assert not norm, 'normalization of features not supported' assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' if coarse: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] else: take_indices, max_index = feature_take_indices(len(self.blocks), indices) x = self.patch_embed(x) x = self._pos_embed(x) intermediates = [] if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): x = blk(x) if i in take_indices: x_out = x.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x intermediates.append(x_out) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, coarse: bool = True, ): """ Prune layers not required for specified intermediates. """ if coarse: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] else: take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_head: self.head.reset(0, reset_other=prune_norm) return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) # BHWC x = self._pos_embed(x) for i, blk in enumerate(self.blocks): x = blk(x) return x def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x # NOTE sam2 appears to use 1024x1024 for all models, but T, S, & B+ have windows that fit multiples of 224. def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 0, 'input_size': (3, 896, 896), 'pool_size': (28, 28), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'min_input_size': (3, 224, 224), 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ "sam2_hiera_tiny.r224": _cfg( hf_hub_id='facebook/sam2-hiera-tiny', hf_hub_filename='sam2_hiera_tiny.pt', input_size=(3, 224, 224), pool_size=(7, 7), ), # FIXME reduced res for testing "sam2_hiera_tiny.r896": _cfg( hf_hub_id='facebook/sam2-hiera-tiny', hf_hub_filename='sam2_hiera_tiny.pt', ), "sam2_hiera_small": _cfg( hf_hub_id='facebook/sam2-hiera-small', hf_hub_filename='sam2_hiera_small.pt', ), "sam2_hiera_base_plus": _cfg( hf_hub_id='facebook/sam2-hiera-base-plus', hf_hub_filename='sam2_hiera_base_plus.pt', ), "sam2_hiera_large": _cfg( hf_hub_id='facebook/sam2-hiera-large', hf_hub_filename='sam2_hiera_large.pt', min_input_size=(3, 256, 256), input_size=(3, 1024, 1024), pool_size=(32, 32), ), "hieradet_small.untrained": _cfg( num_classes=1000, input_size=(3, 256, 256), pool_size=(8, 8), ), }) def checkpoint_filter_fn(state_dict, model=None, prefix=''): state_dict = state_dict.get('model', state_dict) output = {} for k, v in state_dict.items(): if k.startswith(prefix): k = k.replace(prefix, '') else: continue k = k.replace('mlp.layers.0', 'mlp.fc1') k = k.replace('mlp.layers.1', 'mlp.fc2') output[k] = v return output def _create_hiera_det(variant: str, pretrained: bool = False, **kwargs) -> HieraDet: out_indices = kwargs.pop('out_indices', 4) checkpoint_prefix = '' if 'sam2' in variant: # SAM2 pretrained weights have no classifier or final norm-layer (`head.norm`) # This is workaround loading with num_classes=0 w/o removing norm-layer. kwargs.setdefault('pretrained_strict', False) checkpoint_prefix = 'image_encoder.trunk.' return build_model_with_cfg( HieraDet, variant, pretrained, pretrained_filter_fn=partial(checkpoint_filter_fn, prefix=checkpoint_prefix), feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) @register_model def sam2_hiera_tiny(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 7, 2), global_att_blocks=(5, 7, 9)) return _create_hiera_det('sam2_hiera_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_small(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13)) return _create_hiera_det('sam2_hiera_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_base_plus(pretrained=False, **kwargs): model_args = dict(embed_dim=112, num_heads=2, global_pos_size=(14, 14)) return _create_hiera_det('sam2_hiera_base_plus', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_large(pretrained=False, **kwargs): model_args = dict( embed_dim=144, num_heads=2, stages=(2, 6, 36, 4), global_att_blocks=(23, 33, 43), window_spec=(8, 4, 16, 8), ) return _create_hiera_det('sam2_hiera_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hieradet_small(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13), window_spec=(8, 4, 16, 8), init_values=1e-5) return _create_hiera_det('hieradet_small', pretrained=pretrained, **dict(model_args, **kwargs)) # @register_model # def hieradet_base(pretrained=False, **kwargs): # model_args = dict(window_spec=(8, 4, 16, 8)) # return _create_hiera_det('hieradet_base', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/hieradet_sam2.py/0
{ "file_path": "pytorch-image-models/timm/models/hieradet_sam2.py", "repo_id": "pytorch-image-models", "token_count": 11358 }
227
""" Nested Transformer (NesT) in PyTorch A PyTorch implement of Aggregating Nested Transformers as described in: 'Aggregating Nested Transformers' - https://arxiv.org/abs/2105.12723 The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights have been converted with convert/convert_nest_flax.py Acknowledgments: * The paper authors for sharing their research, code, and model weights * Ross Wightman's existing code off which I based this Copyright 2021 Alexander Soare """ import collections.abc import logging import math from functools import partial import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Nest'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): """ This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with an extra "image block" dim """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): """ x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) """ B, T, N, C = x.shape # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) else: q = q * self.scale attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v # (B, H, T, N, C'), permute -> (B, T, N, C', H) x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) x = self.proj(x) x = self.proj_drop(x) return x # (B, T, N, C) class TransformerLayer(nn.Module): """ This is much like `.vision_transformer.Block` but: - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") - Uses modified Attention layer that handles the "block" dimension """ def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): y = self.norm1(x) x = x + self.drop_path(self.attn(y)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): super().__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) self.norm = norm_layer(out_channels) self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) def forward(self, x): """ x is expected to have shape (B, C, H, W) """ _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') x = self.conv(x) # Layer norm done over channel dim only x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x = self.pool(x) return x # (B, C, H//2, W//2) def blockify(x, block_size: int): """image to blocks Args: x (Tensor): with shape (B, H, W, C) block_size (int): edge length of a single square block in units of H, W """ B, H, W, C = x.shape _assert(H % block_size == 0, '`block_size` must divide input height evenly') _assert(W % block_size == 0, '`block_size` must divide input width evenly') grid_height = H // block_size grid_width = W // block_size x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) return x # (B, T, N, C) @register_notrace_function # reason: int receives Proxy def deblockify(x, block_size: int): """blocks to image Args: x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block block_size (int): edge length of a single square block in units of desired H, W """ B, T, _, C = x.shape grid_size = int(math.sqrt(T)) height = width = grid_size * block_size x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) x = x.transpose(2, 3).reshape(B, height, width, C) return x # (B, H, W, C) class NestLevel(nn.Module): """ Single hierarchical level of a Nested Transformer """ def __init__( self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=[], norm_layer=None, act_layer=None, pad_type='', ): super().__init__() self.block_size = block_size self.grad_checkpointing = False self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) if prev_embed_dim is not None: self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) else: self.pool = nn.Identity() # Transformer encoder if len(drop_path): assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' self.transformer_encoder = nn.Sequential(*[ TransformerLayer( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) def forward(self, x): """ expects x as (B, C, H, W) """ x = self.pool(x) x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer x = blockify(x, self.block_size) # (B, T, N, C') x = x + self.pos_embed if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.transformer_encoder, x) else: x = self.transformer_encoder(x) # (B, T, N, C') x = deblockify(x, self.block_size) # (B, H', W', C') # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage return x.permute(0, 3, 1, 2) # (B, C, H', W') class Nest(nn.Module): """ Nested Transformer (NesT) A PyTorch impl of : `Aggregating Nested Transformers` - https://arxiv.org/abs/2105.12723 """ def __init__( self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg', ): """ Args: img_size (int, tuple): input image size in_chans (int): number of input channels patch_size (int): patch size num_levels (int): number of block hierarchies (T_d in the paper) embed_dims (int, tuple): embedding dimensions of each level num_heads (int, tuple): number of attention heads for each level depths (int, tuple): number of transformer layers for each level num_classes (int): number of classes for classification head mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer for transformer layers act_layer: (nn.Module): activation layer in MLP of transformer layers pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME weight_init: (str): weight init scheme global_pool: (str): type of pooling operation to apply to final feature map Notes: - Default values follow NesT-B from the original Jax code. - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. - For those following the paper, Table A1 may have errors! - https://github.com/google-research/nested-transformer/issues/2 """ super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = self.head_hidden_size = embed_dims[-1] self.feature_info = [] norm_layer = norm_layer or LayerNorm act_layer = act_layer or nn.GELU self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert img_size[0] == img_size[1], 'Model only handles square inputs' img_size = img_size[0] assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size # Number of blocks at each level self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' # Block edge size in units of patches # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the # number of blocks along edge of image self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) # Patch embedding self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False, ) self.num_patches = self.patch_embed.num_patches self.seq_length = self.num_patches // self.num_blocks[0] # Build up each hierarchical level levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel( self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dp_rates[i], norm_layer=norm_layer, act_layer=act_layer, pad_type=pad_type, )) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) # Final normalization layer self.norm = norm_layer(embed_dims[-1]) # Classifier global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. for level in self.levels: trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[ (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) # Layer norm done over channel dim only (to NHWC and back) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): """ NesT weight initialization Can replicate Jax implementation. Otherwise follows vision_transformer.py """ if isinstance(module, nn.Linear): if name.startswith('head'): trunc_normal_(module.weight, std=.02, a=-2, b=2) nn.init.constant_(module.bias, head_bias) else: trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) def resize_pos_embed(posemb, posemb_new): """ Rescale the grid of position embeddings when loading from state_dict Expected shape of position embeddings is (1, T, N, C), and considers only square images """ _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) seq_length_old = posemb.shape[2] num_blocks_new, seq_length_new = posemb_new.shape[1:3] size_new = int(math.sqrt(num_blocks_new*seq_length_new)) # First change to (1, C, H, W) posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) # Now change to new (1, T, N, C) posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) return posemb def checkpoint_filter_fn(state_dict, model): """ resize positional embeddings of pretrained weights """ pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] for k in pos_embed_keys: if state_dict[k].shape != getattr(model, k).shape: state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) return state_dict def _create_nest(variant, pretrained=False, **kwargs): model = build_model_with_cfg( Nest, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'nest_base.untrained': _cfg(), 'nest_small.untrained': _cfg(), 'nest_tiny.untrained': _cfg(), # (weights from official Google JAX impl, require 'SAME' padding) 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def nest_base(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) return model @register_model def nest_base_jx(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small_jx(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) return model register_model_deprecations(__name__, { 'jx_nest_base': 'nest_base_jx', 'jx_nest_small': 'nest_small_jx', 'jx_nest_tiny': 'nest_tiny_jx', })
pytorch-image-models/timm/models/nest.py/0
{ "file_path": "pytorch-image-models/timm/models/nest.py", "repo_id": "pytorch-image-models", "token_count": 10094 }
228
"""PyTorch SelecSLS Net example for ImageNet Classification License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) Author: Dushyant Mehta (@mehtadushy) SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera, Mehta et al." https://arxiv.org/abs/1907.00837 Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x class SelectSeq(nn.Module): def __init__(self, mode='index', index=0): super(SelectSeq, self).__init__() self.mode = mode self.index = index @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor]) -> (torch.Tensor) pass def forward(self, x) -> torch.Tensor: if self.mode == 'index': return x[self.index] else: return torch.cat(x, dim=1) def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): if padding is None: padding = ((stride - 1) + dilation * (k - 1)) // 2 return nn.Sequential( nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_chs), nn.ReLU(inplace=True) ) class SelecSlsBlock(nn.Module): def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): super(SelecSlsBlock, self).__init__() self.stride = stride self.is_first = is_first assert stride in [1, 2] # Process input with 4 conv blocks with the same number of input and output channels self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) self.conv2 = conv_bn(mid_chs, mid_chs, 1) self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if not isinstance(x, list): x = [x] assert len(x) in [1, 2] d1 = self.conv1(x[0]) d2 = self.conv3(self.conv2(d1)) d3 = self.conv5(self.conv4(d2)) if self.is_first: out = self.conv6(torch.cat([d1, d2, d3], 1)) return [out, out] else: return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] class SelecSls(nn.Module): """SelecSls42 / SelecSls60 / SelecSls84 Parameters ---------- cfg : network config dictionary specifying block type, feature, and head args num_classes : int, default 1000 Number of classification classes. in_chans : int, default 3 Number of input (color) channels. drop_rate : float, default 0. Dropout probability before classifier, for training global_pool : str, default 'avg' Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' """ def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes super(SelecSls, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = self.head_hidden_size = cfg['num_features'] self.feature_info = cfg['feature_info'] self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^features\.(\d+)', blocks_head=r'^head' ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.features(x) x = self.head(self.from_seq(x)) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_selecsls(variant, pretrained, **kwargs): cfg = {} feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] if variant.startswith('selecsls42'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls42b': cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls60b': cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant == 'selecsls84': cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1), ] feature_info.extend([ dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12'), ]) # Head can be replaced with alternative configurations depending on the problem cfg['head'] = [ (512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1), ] cfg['num_features'] = 1280 feature_info.extend([ dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3') ]) else: raise ValueError('Invalid net configuration ' + variant + ' !!!') cfg['feature_info'] = feature_info # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? return build_model_with_cfg( SelecSls, variant, pretrained, model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'selecsls42.untrained': _cfg( interpolation='bicubic'), 'selecsls42b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls84.untrained': _cfg( interpolation='bicubic'), }) @register_model def selecsls42(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42 model. """ return _create_selecsls('selecsls42', pretrained, **kwargs) @register_model def selecsls42b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42_B model. """ return _create_selecsls('selecsls42b', pretrained, **kwargs) @register_model def selecsls60(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60 model. """ return _create_selecsls('selecsls60', pretrained, **kwargs) @register_model def selecsls60b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60_B model. """ return _create_selecsls('selecsls60b', pretrained, **kwargs) @register_model def selecsls84(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls84 model. """ return _create_selecsls('selecsls84', pretrained, **kwargs)
pytorch-image-models/timm/models/selecsls.py/0
{ "file_path": "pytorch-image-models/timm/models/selecsls.py", "repo_id": "pytorch-image-models", "token_count": 6461 }
229
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'Exploring Plain Vision Transformer Backbones for Object Detection' - https://arxiv.org/abs/2203.16527 'Segment Anything Model (SAM)' - https://github.com/facebookresearch/segment-anything/ """ import logging from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead, \ Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn from torch.jit import Final from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model # model_registry will add each entrypoint fn to this __all__ = ['VisionTransformerSAM'] _logger = logging.getLogger(__name__) def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of query q. k_size (int): size of key k. rel_pos (Tensor): relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] register_notrace_function(get_rel_pos) def get_decomposed_rel_pos_bias( q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: bias (Tensor): attention bias to add to attention map """ q_h, q_w = q_size k_h, k_w = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) B, _, dim = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] return attn_bias.reshape(-1, q_h * q_w, k_h * k_w) class Attention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=True, qk_norm=False, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, use_rel_pos: bool = False, input_size: Optional[Tuple[int, int]] = None, rope: Optional[nn.Module] = None, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.use_rel_pos = use_rel_pos if self.use_rel_pos: assert rope is None assert ( input_size is not None ), "Input size must be provided if using relative positional encoding." # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros( 2 * input_size[0] - 1, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros( 2 * input_size[1] - 1, self.head_dim)) self.rope = rope def forward(self, x): B, H, W, _ = x.shape N = H * W x = x.reshape(B, N, -1) qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # qkv with shape (3, B, nHead, H * W, C) q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0) # q, k, v with shape (B * nHead, H * W, C) q, k = self.q_norm(q), self.k_norm(k) if self.use_rel_pos: attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) else: attn_bias = None if self.rope is not None: rope = self.rope.get_embed() q = apply_rot_embed_cat(q, rope).type_as(v) k = apply_rot_embed_cat(k, rope).type_as(v) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if attn_bias is not None: attn = attn + attn_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) x = x.view(B, H, W, -1) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=True, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, use_rel_pos=False, window_size=0, input_size=None, rope=None, ): super().__init__() self.window_size = window_size self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, use_rel_pos=use_rel_pos, input_size=input_size if window_size == 0 else (window_size, window_size), rope=rope, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): B, H, W, _ = x.shape shortcut = x x = self.norm1(x) # Window partition pad_hw: Optional[Tuple[int, int]] = None if self.window_size > 0: x, pad_hw = window_partition(x, self.window_size) x = self.drop_path1(self.ls1(self.attn(x))) # Reverse window partition if self.window_size > 0: x = window_unpartition(x, self.window_size, (H, W), pad_hw) x = shortcut + x x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) x = x.reshape(B, H, W, -1) return x def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows, (Hp, Wp) def window_unpartition( windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None, ) -> torch.Tensor: """ Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ Hp, Wp = pad_hw if pad_hw is not None else hw H, W = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) x = x[:, :H, :W, :].contiguous() return x class VisionTransformerSAM(nn.Module): """ Vision Transformer for Segment-Anything Model(SAM) A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)` - https://arxiv.org/abs/2010.11929 """ def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, num_classes: int = 768, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, init_values: Optional[float] = None, pre_norm: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: str = '', embed_layer: Callable = partial(PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False), norm_layer: Optional[Callable] = nn.LayerNorm, act_layer: Optional[Callable] = nn.GELU, block_fn: Callable = Block, mlp_layer: Callable = Mlp, use_abs_pos: bool = True, use_rel_pos: bool = False, use_rope: bool = False, window_size: int = 14, global_attn_indexes: Tuple[int, ...] = (), neck_chans: int = 256, global_pool: str = 'avg', head_hidden_size: Optional[int] = None, ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of image input channels. num_classes: Mumber of classes for classification head. global_pool: Type of global pooling for final sequence (default: 'token'). embed_dim: Transformer embedding dimension. depth: Depth of transformer. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: Enable bias for qkv projections if True. init_values: Layer-scale init values (layer-scale enabled if not None). drop_rate: Head dropout rate. pos_drop_rate: Position embedding dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. weight_init: Weight initialization scheme. embed_layer: Patch embedding layer. norm_layer: Normalization layer. act_layer: MLP activation layer. block_fn: Transformer block layer. use_abs_pos: If True, use absolute positional embeddings. use_rel_pos: If True, add relative positional embeddings to the attention map. use_rope: If True, add rotary position embeddings to q/k in attention block. window_size: Window size for window attention blocks. If 0, not use window attention. global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0. global_pool: Global pooling type. head_hidden_size: If set, use NormMlpHead ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local) """ super().__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, # disable bias if pre-norm is used ) grid_size = self.patch_embed.grid_size r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=0, ) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() if use_rope: assert not use_rel_pos, "ROPE and relative pos embeddings should not be enabled at same time" if ref_feat_shape is not None: assert len(ref_feat_shape) == 2 ref_feat_shape_global = to_2tuple(ref_feat_shape[0]) ref_feat_shape_window = to_2tuple(ref_feat_shape[1]) else: ref_feat_shape_global = ref_feat_shape_window = None self.rope_global = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=grid_size, ref_feat_shape=ref_feat_shape_global, ) self.rope_window = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=to_2tuple(window_size), ref_feat_shape=ref_feat_shape_window, ) else: self.rope_global = None self.rope_window = None # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.Sequential(*[ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, use_rel_pos=use_rel_pos, window_size=window_size if i not in global_attn_indexes else 0, input_size=grid_size, rope=self.rope_window if i not in global_attn_indexes else self.rope_global, ) for i in range(depth)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] if neck_chans: self.neck = nn.Sequential( nn.Conv2d( embed_dim, neck_chans, kernel_size=1, bias=False, ), LayerNorm2d(neck_chans), nn.Conv2d( neck_chans, neck_chans, kernel_size=3, padding=1, bias=False, ), LayerNorm2d(neck_chans), ) self.num_features = neck_chans else: if head_hidden_size: self.neck = nn.Identity() else: # should have a final norm with standard ClassifierHead self.neck = LayerNorm2d(embed_dim) neck_chans = embed_dim # Classifier Head if head_hidden_size: self.head = NormMlpClassifierHead( neck_chans, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, ) else: self.head = ClassifierHead( neck_chans, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt == 'NCHW', 'Output shape for ViT-SAM must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass, collect intermediates x = self.patch_embed(x) if self.pos_embed is not None: # dynamically resize abs pos embedding if needed x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) x = self.pos_drop(x) x = self.patch_drop(x) x = self.norm_pre(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): x = blk(x) if i in take_indices: # make output BCHW if norm: # norm is intertwined with neck convs so apply both, changes the dim # FIXME only apply to final? Need experiments intermediates.append(self.neck(x.permute(0, 3, 1, 2))) else: intermediates.append(x.permute(0, 3, 1, 2)) if intermediates_only: return intermediates x = self.neck(x.permute(0, 3, 1, 2)) return x, intermediates def prune_intermediate_layers( self, indices: Optional[Union[int, List[int]]] = None, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: # neck is being treated as equivalent to final norm here self.neck = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) if self.pos_embed is not None: # dynamically resize abs pos embedding if needed x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) x = self.pos_drop(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.neck(x.permute(0, 3, 1, 2)) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn( state_dict, model, ): """ Remap SAM checkpoints -> timm """ sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict out_dict = {} for k, v in state_dict.items(): if k.startswith('image_encoder.'): k = k[14:] k = k.replace('mlp.lin', 'mlp.fc') else: if sam_checkpoint: continue out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # Segment-Anyhing Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only) 'samvit_base_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_large_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_huge_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_base_patch16_224': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000, input_size=(3, 224, 224), crop_pct=0.9), }) def _create_vision_transformer(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) return build_model_with_cfg( VisionTransformerSAM, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) @register_model def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-B/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-L/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-H/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-B/16 based on samvit arch """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None, ) model = _create_vision_transformer( 'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/vision_transformer_sam.py/0
{ "file_path": "pytorch-image-models/timm/models/vision_transformer_sam.py", "repo_id": "pytorch-image-models", "token_count": 13913 }
230
""" Lion Optimizer Paper: `Symbolic Discovery of Optimization Algorithms` - https://arxiv.org/abs/2302.06675 Original Impl: https://github.com/google/automl/tree/master/lion """ # Copyright 2023 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import List import torch from torch.optim.optimizer import Optimizer class Lion(Optimizer): r"""Implements Lion algorithm.""" def __init__( self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0, maximize=False, foreach=None, ): """Initialize the hyperparameters. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-4) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.99)) weight_decay (float, optional): weight decay coefficient (default: 0) """ if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict( lr=lr, betas=betas, weight_decay=weight_decay, foreach=foreach, maximize=maximize, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('maximize', False) group.setdefault('foreach', None) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. Returns: the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Lion does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) lion( params_with_grad, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], maximize=group['maximize'], foreach=group['foreach'], ) return loss def lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim maximize: bool = False, foreach: bool = None, *, beta1: float, beta2: float, lr: float, weight_decay: float, ): r"""Functional API that performs Lion algorithm computation. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_lion else: func = _single_tensor_lion func( params, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, maximize=maximize, ) def _single_tensor_lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool, ): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] if torch.is_complex(param): grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) param = torch.view_as_real(param) # Perform stepweight decay param.mul_(1 - lr * weight_decay) # Weight update update = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) param.add_(torch.sign(update), alpha=-lr) # Decay the momentum running average coefficient exp_avg.lerp_(grad, 1 - beta2) def _multi_tensor_lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool, ): if len(params) == 0: return if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # Perform stepweight decay torch._foreach_mul_(params, 1 - lr * weight_decay) # Weight update updates = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(updates, grads, alpha=1 - beta1) updates = [u.sign() for u in updates] torch._foreach_add_(params, updates, alpha=-lr) # Decay the momentum running average coefficient torch._foreach_mul_(exp_avgs, beta2) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta2)
pytorch-image-models/timm/optim/lion.py/0
{ "file_path": "pytorch-image-models/timm/optim/lion.py", "repo_id": "pytorch-image-models", "token_count": 3257 }
231
import abc from abc import ABC from typing import Any, Dict, List, Optional import torch class Scheduler(ABC): """ Parameter Scheduler Base Class A scheduler base class that can be used to schedule any optimizer parameter groups. Unlike the builtin PyTorch schedulers, this is intended to be consistently called * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value * At the END of each optimizer update, after incrementing the update count, to calculate next update's value The schedulers built on this should try to remain as stateless as possible (for simplicity). This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' and -1 values for special behaviour. All epoch and update counts must be tracked in the training code and explicitly passed in to the schedulers on the corresponding step or step_update call. Based on ideas from: * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers """ def __init__( self, optimizer: torch.optim.Optimizer, param_group_field: str, t_in_epochs: bool = True, noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize: bool = True, ) -> None: self.optimizer = optimizer self.param_group_field = param_group_field self._initial_param_group_field = f"initial_{param_group_field}" if initialize: for i, group in enumerate(self.optimizer.param_groups): if param_group_field not in group: raise KeyError(f"{param_group_field} missing from param_groups[{i}]") group.setdefault(self._initial_param_group_field, group[param_group_field]) else: for i, group in enumerate(self.optimizer.param_groups): if self._initial_param_group_field not in group: raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] self.metric = None # any point to having this for all? self.t_in_epochs = t_in_epochs self.noise_range_t = noise_range_t self.noise_pct = noise_pct self.noise_type = noise_type self.noise_std = noise_std self.noise_seed = noise_seed if noise_seed is not None else 42 self.update_groups(self.base_values) def state_dict(self) -> Dict[str, Any]: return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod def _get_lr(self, t: int) -> List[float]: pass def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]: proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) if not proceed: return None return self._get_lr(t) def step(self, epoch: int, metric: float = None) -> None: self.metric = metric values = self._get_values(epoch, on_epoch=True) if values is not None: values = self._add_noise(values, epoch) self.update_groups(values) def step_update(self, num_updates: int, metric: float = None): self.metric = metric values = self._get_values(num_updates, on_epoch=False) if values is not None: values = self._add_noise(values, num_updates) self.update_groups(values) def update_groups(self, values): if not isinstance(values, (list, tuple)): values = [values] * len(self.optimizer.param_groups) for param_group, value in zip(self.optimizer.param_groups, values): if 'lr_scale' in param_group: param_group[self.param_group_field] = value * param_group['lr_scale'] else: param_group[self.param_group_field] = value def _add_noise(self, lrs, t): if self._is_apply_noise(t): noise = self._calculate_noise(t) lrs = [v + v * noise for v in lrs] return lrs def _is_apply_noise(self, t) -> bool: """Return True if scheduler in noise range.""" apply_noise = False if self.noise_range_t is not None: if isinstance(self.noise_range_t, (list, tuple)): apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] else: apply_noise = t >= self.noise_range_t return apply_noise def _calculate_noise(self, t) -> float: g = torch.Generator() g.manual_seed(self.noise_seed + t) if self.noise_type == 'normal': while True: # resample if noise out of percent limit, brute force but shouldn't spin much noise = torch.randn(1, generator=g).item() if abs(noise) < self.noise_pct: return noise else: noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct return noise
pytorch-image-models/timm/scheduler/scheduler.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/scheduler.py", "repo_id": "pytorch-image-models", "token_count": 2368 }
232
""" Model / state_dict utils Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch from copy import deepcopy import torch from torchvision.ops.misc import FrozenBatchNorm2d from timm.layers import BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d,\ freeze_batch_norm_2d, unfreeze_batch_norm_2d from .model_ema import ModelEma def unwrap_model(model): if isinstance(model, ModelEma): return unwrap_model(model.ema) else: if hasattr(model, 'module'): return unwrap_model(model.module) elif hasattr(model, '_orig_mod'): return unwrap_model(model._orig_mod) else: return model def get_state_dict(model, unwrap_fn=unwrap_model): return unwrap_fn(model).state_dict() def avg_sq_ch_mean(model, input, output): """ calculate average channel square mean of output activations """ return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() def avg_ch_var(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() def avg_ch_var_residual(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() class ActivationStatsHook: """Iterates through each of `model`'s modules and matches modules using unix pattern matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is a match. Arguments: model (nn.Module): model from which we will extract the activation stats hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string matching with the name of model's modules. hook_fns (List[Callable]): List of hook functions to be registered at every module in `layer_names`. Inspiration from https://docs.fast.ai/callback.hook.html. Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example on how to plot Signal Propogation Plots using `ActivationStatsHook`. """ def __init__(self, model, hook_fn_locs, hook_fns): self.model = model self.hook_fn_locs = hook_fn_locs self.hook_fns = hook_fns if len(hook_fn_locs) != len(hook_fns): raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ their lengths are different.") self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): self.register_hook(hook_fn_loc, hook_fn) def _create_hook(self, hook_fn): def append_activation_stats(module, input, output): out = hook_fn(module, input, output) self.stats[hook_fn.__name__].append(out) return append_activation_stats def register_hook(self, hook_fn_loc, hook_fn): for name, module in self.model.named_modules(): if not fnmatch.fnmatch(name, hook_fn_loc): continue module.register_forward_hook(self._create_hook(hook_fn)) def extract_spp_stats( model, hook_fn_locs, hook_fns, input_shape=[8, 3, 224, 224]): """Extract average square channel mean and variance of activations during forward pass to plot Signal Propogation Plots (SPP). Paper: https://arxiv.org/abs/2101.08692 Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 """ x = torch.normal(0., 1., input_shape) hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) _ = model(x) return hook.stats def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): """ Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be (un)frozen. Defaults to [] include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. Defaults to `True`. mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. """ assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' if isinstance(root_module, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): # Raise assertion here because we can't convert it in place raise AssertionError( "You have provided a batch norm layer as the `root module`. Please use " "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") if isinstance(submodules, str): submodules = [submodules] named_modules = submodules submodules = [root_module.get_submodule(m) for m in submodules] if not len(submodules): named_modules, submodules = list(zip(*root_module.named_children())) for n, m in zip(named_modules, submodules): # (Un)freeze parameters for p in m.parameters(): p.requires_grad = False if mode == 'freeze' else True if include_bn_running_stats: # Helper to add submodule specified as a named_module def _add_submodule(module, name, submodule): split = name.rsplit('.', 1) if len(split) > 1: module.get_submodule(split[0]).add_module(split[1], submodule) else: module.add_module(name, submodule) # Freeze batch norm if mode == 'freeze': res = freeze_batch_norm_2d(m) # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't # convert it in place, but will return the converted result. In this case `res` holds the converted # result and we may try to re-assign the named module if isinstance(m, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): _add_submodule(root_module, n, res) # Unfreeze batch norm else: res = unfreeze_batch_norm_2d(m) # Ditto. See note above in mode == 'freeze' branch if isinstance(m, (FrozenBatchNorm2d, FrozenBatchNormAct2d)): _add_submodule(root_module, n, res) def freeze(root_module, submodules=[], include_bn_running_stats=True): """ Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be frozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, it's good practice to freeze batch norm stats. And note that these are different to the affine parameters which are just normal PyTorch parameters. Defaults to `True`. Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. Examples:: >>> model = timm.create_model('resnet18') >>> # Freeze up to and including layer2 >>> submodules = [n for n, _ in model.named_children()] >>> print(submodules) ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] >>> freeze(model, submodules[:submodules.index('layer2') + 1]) >>> # Check for yourself that it works as expected >>> print(model.layer2[0].conv1.weight.requires_grad) False >>> print(model.layer3[0].conv1.weight.requires_grad) True >>> # Unfreeze >>> unfreeze(model) """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") def unfreeze(root_module, submodules=[], include_bn_running_stats=True): """ Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be unfrozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. These will be converted to `BatchNorm2d` in place. Defaults to `True`. See example in docstring for `freeze`. """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze") def reparameterize_model(model: torch.nn.Module, inplace=False) -> torch.nn.Module: if not inplace: model = deepcopy(model) def _fuse(m): for child_name, child in m.named_children(): if hasattr(child, 'fuse'): setattr(m, child_name, child.fuse()) elif hasattr(child, "reparameterize"): child.reparameterize() elif hasattr(child, "switch_to_deploy"): child.switch_to_deploy() _fuse(child) _fuse(model) return model
pytorch-image-models/timm/utils/model.py/0
{ "file_path": "pytorch-image-models/timm/utils/model.py", "repo_id": "pytorch-image-models", "token_count": 4320 }
233
ARG PLATFORM=xpu FROM lukemathwalker/cargo-chef:latest-rust-1.80 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef AS planner COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json ARG GIT_SHA ARG DOCKER_LABEL COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo build --profile release-opt # Text Generation Inference base image for Intel FROM intel/intel-extension-for-pytorch:2.1.30-xpu AS xpu USER root # libssl.so.1.1 is not installed on Ubuntu 22.04 by default, install it RUN wget http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb && \ dpkg -i ./libssl1.1_1.1.1f-1ubuntu2_amd64.deb RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list RUN apt-get update && apt install -y intel-basekit xpu-smi cmake python3-dev ninja-build pciutils # Text Generation Inference base env ENV HF_HOME=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN wget https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/xpu/torch-2.1.0.post1%2Bcxx11.abi-cp310-cp310-linux_x86_64.whl && pip install torch-2.1.0.post1+cxx11.abi-cp310-cp310-linux_x86_64.whl RUN pip install https://github.com/intel/intel-xpu-backend-for-triton/releases/download/v2.1.0/triton-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout -b distributed origin/dev/distributed # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements_intel.txt && \ pip install ".[accelerate, peft, outlines]" --no-cache-dir ENV CCL_ROOT=/opt/intel/oneapi/ccl/latest ENV I_MPI_ROOT=/opt/intel/oneapi/mpi/latest ENV FI_PROVIDER_PATH=/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/lib/prov:/usr/lib/x86_64-linux-gnu/libfabric ENV LIBRARY_PATH=/opt/intel/oneapi/mpi/latest/lib:/opt/intel/oneapi/ccl/latest/lib/:/opt/intel/oneapi/mkl/latest/lib/:/opt/intel/oneapi/compiler/latest/lib ENV LD_LIBRARY_PATH=/opt/intel/oneapi/ccl/latest/lib/:/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/lib:/opt/intel/oneapi/mpi/latest/lib:/opt/intel/oneapi/mkl/latest/lib:/opt/intel/oneapi/compiler/latest/opt/compiler/lib:/opt/intel/oneapi/compiler/latest/lib:/opt/intel/oneapi/lib:/opt/intel/oneapi/lib/intel64: ENV PATH=/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/bin:/opt/intel/oneapi/mpi/latest/bin:/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/bin:/opt/intel/oneapi/mkl/latest/bin/:/opt/intel/oneapi/compiler/latest/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ENV CCL_ZE_IPC_EXCHANGE=sockets ENV CMAKE_PREFIX_PATH=/opt/intel/oneapi/mkl/latest/lib/cmake:/opt/intel/oneapi/compiler/latest ENV CPATH=/opt/intel/oneapi/mpi/latest/include:/opt/intel/oneapi/ccl/latest/include:/opt/intel/oneapi/mkl/latest/include RUN pip uninstall -y intel-extension-for-pytorch && cd intel-extension-for-pytorch && git submodule update --init --recursive && USE_AOT_DEVLIST='pvc' BUILD_SEPARATE_OPS=OFF BUILD_WITH_CPU=OFF USE_XETLA=ON python setup.py install && rm -rf /usr/src/intel-extension-for-pytorch # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher # Text Generation Inference base image for Intel-cpu FROM ubuntu:22.04 AS cpu RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ curl \ ca-certificates \ make \ g++ \ git \ wget \ cmake \ libnuma-dev ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 ARG MAMBA_VERSION=23.1.0-1 ARG PYTHON_VERSION='3.10.10' # Automatically set by buildx ARG TARGETPLATFORM ENV PATH /opt/conda/bin:$PATH # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh RUN conda install -c conda-forge gperftools mkl RUN pip install https://download.pytorch.org/whl/nightly/cpu/torch-2.4.0.dev20240612%2Bcpu-cp310-cp310-linux_x86_64.whl RUN pip install https://download.pytorch.org/whl/nightly/cpu/torchvision-0.19.0.dev20240612%2Bcpu-cp310-cp310-linux_x86_64.whl RUN pip install https://download.pytorch.org/whl/nightly/cpu/torchaudio-2.4.0.dev20240612%2Bcpu-cp310-cp310-linux_x86_64.whl RUN pip install triton numa WORKDIR /usr/src RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout eda7a7c42df6f9a64e0de9c2b69304ee02f2c32a RUN git clone https://github.com/intel/torch-ccl.git && cd torch-ccl && git checkout ccl_torch_dev_0131 RUN cd intel-extension-for-pytorch && git submodule sync && git submodule update --init --recursive && python setup.py install RUN cd torch-ccl && git submodule sync && git submodule update --init --recursive && pip install . ENV LD_PRELOAD=/opt/conda/lib/libtcmalloc.so ENV CCL_ROOT=/opt/conda/lib/python3.10/site-packages/oneccl_bindings_for_pytorch ENV I_MPI_ROOT=/opt/conda/lib/python3.10/site-packages/oneccl_bindings_for_pytorch ENV FI_PROVIDER_PATH=/opt/conda/lib/python3.10/site-packages/oneccl_bindings_for_pytorch/opt/mpi/libfabric/lib/prov:/usr/lib64/libfabric ENV LD_LIBRARY_PATH=/opt/conda/lib/python3.10/site-packages/oneccl_bindings_for_pytorch/opt/mpi/libfabric/lib:/opt/conda/lib/python3.10/site-packages/oneccl_bindings_for_pytorch/lib # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements_intel.txt && \ pip install ".[accelerate, peft, outlines]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher FROM ${PLATFORM} AS final ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
text-generation-inference/Dockerfile_intel/0
{ "file_path": "text-generation-inference/Dockerfile_intel", "repo_id": "text-generation-inference", "token_count": 3313 }
234
#include <fstream> #include <fmt/ranges.h> #include <spdlog/spdlog.h> #include <nvml.h> #include "backend.h" #include "hardware.h" void huggingface::tgi::backends::InitializeBackend() { SPDLOG_INFO("Initializing Backend..."); nvmlInit_v2(); initTrtLlmPlugins(); const auto numGpus = huggingface::hardware::cuda::GetNumDevices(); if (numGpus.has_value()) { SPDLOG_INFO("Detected {:d} Nvidia GPU(s)", numGpus.value()); } else { SPDLOG_WARN("Failed to detected Nvidia GPU(s) on the system"); } } [[nodiscard]] tle::ExecutorConfig huggingface::tgi::backends::GetExecutorConfig(const json &config, const std::string &workerPath) { tle::ExecutorConfig execConfig(1); // Retrieve the compute capabilities to enable some options at runtime const auto computeCapabilities = huggingface::hardware::cuda::GetCudaComputeCapabilities(); // Single engine (TP = PP = 1) -> using leader mode (no MPI involved) if (config["/pretrained_config/mapping/world_size"_json_pointer].get<uint8_t>() == 1) { SPDLOG_INFO("Detected single engine deployment, using leader mode"); execConfig.setParallelConfig(tle::ParallelConfig( tle::CommunicationType::kMPI, tle::CommunicationMode::kLEADER, std::nullopt, std::nullopt, std::nullopt )); } else { // Multiple engines -> using orchestrator mode (MPI involved) SPDLOG_INFO("Detected sharded engine deployment, using orchestrator mode"); execConfig.setParallelConfig(tle::ParallelConfig( tle::CommunicationType::kMPI, tle::CommunicationMode::kORCHESTRATOR, std::nullopt, std::nullopt, tle::OrchestratorConfig(true, workerPath, nullptr, true) )); } // Define some configuration variables execConfig.setKvCacheConfig(tle::KvCacheConfig(true)); execConfig.setEnableChunkedContext(computeCapabilities.isPostAmpere()); return execConfig; } tle::SamplingConfig huggingface::tgi::backends::GetSamplingConfig( uint32_t topK, float_t topP, float_t temperature, float_t repetition_penalty, float_t frequency_penalty, uint64_t seed) { return tle::SamplingConfig( 1, // TGI only use a single beam topK, topP, std::nullopt, std::nullopt, std::nullopt, seed, temperature, temperature, std::nullopt, repetition_penalty, std::nullopt, frequency_penalty ); } huggingface::tgi::backends::TensorRtLlmBackend::TensorRtLlmBackend( const std::filesystem::path &enginesFolder, const std::filesystem::path &executorWorker ) : config(json::parse(std::ifstream(enginesFolder / "config.json"))), executor( enginesFolder, tensorrt_llm::executor::ModelType::kDECODER_ONLY, GetExecutorConfig(config, executorWorker.string() )) { SPDLOG_INFO(FMT_STRING("Engine (version={})"), config["/version"_json_pointer].get_ref<const std::string &>()); } bool huggingface::tgi::backends::TensorRtLlmBackend::IsReady() const { return executor.canEnqueueRequests(); } [[nodiscard("Returned number of requests needs to be consumed")]] size_t huggingface::tgi::backends::TensorRtLlmBackend::NumResponsesReady() const { return executor.getNumResponsesReady(); } [[nodiscard("Returned request id needs to be provided back to gather generated tokens")]] tle::IdType huggingface::tgi::backends::TensorRtLlmBackend::Submit( const std::vector<tle::TokenIdType> &tokens, const int32_t topK, const float_t topP, const float_t temperature, const float_t repetition_penalty, const float_t frequency_penalty, const uint64_t seed ) { #ifdef NDEBUG SPDLOG_DEBUG( FMT_STRING("Submitting inference over {:d} tokens to the executor ({:d} already in-flight)"), tokens.size(), executor.getLatestIterationStats().back().numActiveRequests ); #else SPDLOG_DEBUG( FMT_STRING("Submitting inference [{}] to the executor ({:d} already in-flight)"), fmt::join(tokens, ", "), executor.getLatestIterationStats().front().numActiveRequests ); #endif const auto maxNumTokens = config["/build_config/max_num_tokens"_json_pointer].get<size_t>(); const auto maxNewTokens = static_cast<int32_t>(std::max(1ul, maxNumTokens - tokens.size())); const auto sampling = GetSamplingConfig(topK, topP, temperature, repetition_penalty, frequency_penalty, seed); const auto output = tle::OutputConfig(true, false, false, true, false); return executor.enqueueRequest( tle::Request{tokens, maxNewTokens, true, sampling, output}); } [[nodiscard("Generated tokens result must be used")]] std::vector<tle::Response> huggingface::tgi::backends::TensorRtLlmBackend::Poll(const tle::IdType requestId) { SPDLOG_DEBUG(FMT_STRING("Polling status for request {:d}"), requestId); return executor.awaitResponses(requestId); } void huggingface::tgi::backends::TensorRtLlmBackend::Shutdown() { SPDLOG_INFO("Shutting down executor"); executor.shutdown(); }
text-generation-inference/backends/trtllm/lib/backend.cpp/0
{ "file_path": "text-generation-inference/backends/trtllm/lib/backend.cpp", "repo_id": "text-generation-inference", "token_count": 2273 }
235
mod backend; pub mod block_allocator; mod client; mod queue; pub mod radix; use crate::client::{ClientError, ShardedClient}; pub(crate) use backend::BackendV3; use serde::Serialize; use thiserror::Error; use utoipa::ToSchema; #[derive(Clone, Debug, Serialize, ToSchema)] pub struct BackendInfo { /// Mandatory #[schema(example = "cuda")] pub model_device_type: String, #[schema(example = "torch.float16")] pub model_dtype: String, /// Backend parameters #[schema(example = "1")] pub speculate: usize, #[schema(example = "1.2")] pub waiting_served_ratio: f32, #[schema(example = "32000")] pub max_batch_total_tokens: u32, #[schema(example = "20")] pub max_waiting_tokens: usize, #[schema(nullable = true, example = "null")] pub max_batch_size: Option<usize>, } #[allow(clippy::too_many_arguments)] pub async fn connect_backend( max_input_tokens: usize, max_total_tokens: usize, master_shard_uds_path: String, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: Option<u32>, max_waiting_tokens: usize, max_batch_size: Option<usize>, ) -> Result<(BackendV3, BackendInfo), V3Error> { // Helper function let check_max_batch_total_tokens = |max_supported_batch_total_tokens: Option<u32>| { match max_supported_batch_total_tokens { // Older models do not support automatic max-batch-total-tokens None => { let max_batch_total_tokens = max_batch_total_tokens .unwrap_or(16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens))); tracing::warn!("Model does not support automatic max batch total tokens"); Ok(max_batch_total_tokens) } // Flash attention models return their max supported total tokens Some(max_supported_batch_total_tokens) => { // Warn if user added his own max-batch-total-tokens as we will ignore it if max_batch_total_tokens.is_some() { tracing::warn!( "`--max-batch-total-tokens` is deprecated for Flash \ Attention models." ); tracing::warn!( "Inferred max batch total tokens: {max_supported_batch_total_tokens}" ); } if max_total_tokens as u32 > max_supported_batch_total_tokens { return Err(V3Error::NotEnoughMemory(max_total_tokens)); } Ok(max_supported_batch_total_tokens) } } }; let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .map_err(V3Error::Connection)?; // server is running on v3 // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .map_err(V3Error::Cache)?; // Get info from the shard let shard_info = sharded_client.info().await.map_err(V3Error::Info)?; // Warmup model tracing::info!("Warming up model"); let max_batch_total_tokens = check_max_batch_total_tokens( sharded_client .warmup( max_input_tokens as u32, max_batch_prefill_tokens, max_total_tokens as u32, max_batch_size, ) .await .map_err(V3Error::Warmup)?, )?; tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}"); let backend_info = BackendInfo { waiting_served_ratio, max_batch_total_tokens, max_waiting_tokens, max_batch_size, model_device_type: shard_info.device_type.clone(), model_dtype: shard_info.dtype.clone(), speculate: shard_info.speculate as usize, }; let backend = BackendV3::new( sharded_client, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_batch_size, shard_info.requires_padding, shard_info.window_size, shard_info.speculate, ); tracing::info!("Using backend V3"); Ok((backend, backend_info)) } #[derive(Debug, Error)] pub enum V3Error { #[error("Unable to clear the Python model shards cache: {0}")] Cache(ClientError), #[error("Unable to connect to the Python model shards: {0}")] Connection(ClientError), #[error("Unable to get the Python model shards info: {0}")] Info(ClientError), #[error("Unable to warmup the Python model shards: {0}")] Warmup(ClientError), #[error("Not enough memory to handle `max_total_tokens={0}`")] NotEnoughMemory(usize), }
text-generation-inference/backends/v3/src/lib.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 2266 }
236
# Text Generation Inference Architecture This document aims at describing the architecture of Text Generation Inference (TGI), by describing the call flow between the separate components. A high-level architecture diagram can be seen here: ![TGI architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/TGI.png) This diagram shows well there are these separate components: - **The router**, also named `webserver`, that receives the client requests, buffers them, creates some batches, and prepares gRPC calls to a model server. - **The model server**, responsible of receiving the gRPC requests and to process the inference on the model. If the model is sharded across multiple accelerators (e.g.: multiple GPUs), the model server shards might be synchronized via NCCL or equivalent. - **The launcher** is a helper thar will be able to launch one or several model servers (if model is sharded), and it launches the router with the compatible arguments. The router and the model server can be two different machines, they do not need to be deployed together. ## The Router This component is a rust web server binary that accepts HTTP requests using the custom [HTTP API](https://huggingface.github.io/text-generation-inference/), as well as OpenAI's [Messages API](https://huggingface.co/docs/text-generation-inference/messages_api). The router receives the API calls and handles the "baches" logic (and introduction to batching can be found [here](https://github.com/huggingface/text-generation-inference/blob/main/router/README.md)). It uses different strategies to reduce latency between requests and responses, especially oriented to decoding latency. It will use queues, schedulers, and block allocators to achieve that and produce batched requests that it will then be sent to the model server. ### Router's command line The router command line will be the way to pass parameters to it (it does not rely on configuration file): ``` Text Generation Webserver Usage: text-generation-router [OPTIONS] Options: --max-concurrent-requests <MAX_CONCURRENT_REQUESTS> [env: MAX_CONCURRENT_REQUESTS=] [default: 128] --max-best-of <MAX_BEST_OF> [env: MAX_BEST_OF=] [default: 2] --max-stop-sequences <MAX_STOP_SEQUENCES> [env: MAX_STOP_SEQUENCES=] [default: 4] --max-top-n-tokens <MAX_TOP_N_TOKENS> [env: MAX_TOP_N_TOKENS=] [default: 5] --max-input-tokens <MAX_INPUT_TOKENS> [env: MAX_INPUT_TOKENS=] [default: 1024] --max-total-tokens <MAX_TOTAL_TOKENS> [env: MAX_TOTAL_TOKENS=] [default: 2048] --waiting-served-ratio <WAITING_SERVED_RATIO> [env: WAITING_SERVED_RATIO=] [default: 1.2] --max-batch-prefill-tokens <MAX_BATCH_PREFILL_TOKENS> [env: MAX_BATCH_PREFILL_TOKENS=] [default: 4096] --max-batch-total-tokens <MAX_BATCH_TOTAL_TOKENS> [env: MAX_BATCH_TOTAL_TOKENS=] --max-waiting-tokens <MAX_WAITING_TOKENS> [env: MAX_WAITING_TOKENS=] [default: 20] --max-batch-size <MAX_BATCH_SIZE> [env: MAX_BATCH_SIZE=] --hostname <HOSTNAME> [env: HOSTNAME=] [default: 0.0.0.0] -p, --port <PORT> [env: PORT=] [default: 3000] --master-shard-uds-path <MASTER_SHARD_UDS_PATH> [env: MASTER_SHARD_UDS_PATH=] [default: /tmp/text-generation-server-0] --tokenizer-name <TOKENIZER_NAME> [env: TOKENIZER_NAME=] [default: bigscience/bloom] --tokenizer-config-path <TOKENIZER_CONFIG_PATH> [env: TOKENIZER_CONFIG_PATH=] --revision <REVISION> [env: REVISION=] --validation-workers <VALIDATION_WORKERS> [env: VALIDATION_WORKERS=] [default: 2] --json-output [env: JSON_OUTPUT=] --otlp-endpoint <OTLP_ENDPOINT> [env: OTLP_ENDPOINT=] --otlp-service-name <OTLP_SERVICE_NAME> [env: OTLP_SERVICE_NAME=] --cors-allow-origin <CORS_ALLOW_ORIGIN> [env: CORS_ALLOW_ORIGIN=] --ngrok [env: NGROK=] --ngrok-authtoken <NGROK_AUTHTOKEN> [env: NGROK_AUTHTOKEN=] --ngrok-edge <NGROK_EDGE> [env: NGROK_EDGE=] --messages-api-enabled [env: MESSAGES_API_ENABLED=] --disable-grammar-support [env: DISABLE_GRAMMAR_SUPPORT=] --max-client-batch-size <MAX_CLIENT_BATCH_SIZE> [env: MAX_CLIENT_BATCH_SIZE=] [default: 4] -h, --help Print help -V, --version Print version ``` ## The Model Server The model server is a python server, capable of starting a server waiting for gRPC requests, loads a given model, perform sharding to provide [tensor parallelism](https://huggingface.co/docs/text-generation-inference/conceptual/tensor_parallelism), and stays alive while waiting for new requests. The model server supports models instantiated using Pytorch and optimized for inference mainly on CUDA/ROCM. ### Model Server Variants Several variants of the model server exist that are actively supported by Hugging Face: - By default, the model server will attempt building [a server optimized for Nvidia GPUs with CUDA](https://huggingface.co/docs/text-generation-inference/installation_nvidia). The code for this version is hosted in the [main TGI repository](https://github.com/huggingface/text-generation-inference). - A [version optimized for AMD with ROCm](https://huggingface.co/docs/text-generation-inference/installation_amd) is hosted in the main TGI repository. Some model features differ. - A [version optimized for Intel GPUs](https://huggingface.co/docs/text-generation-inference/installation_intel) is hosted in the main TGI repository. Some model features differ. - The [version for Intel Gaudi](https://huggingface.co/docs/text-generation-inference/installation_gaudi) is maintained on a forked repository, often resynchronized with the main [TGI repository](https://github.com/huggingface/tgi-gaudi). - A [version for Neuron (AWS Inferentia2)](https://huggingface.co/docs/text-generation-inference/installation_inferentia) is maintained as part of [Optimum Neuron](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference). - A version for Google TPUs is maintained as part of [Optimum TPU](https://github.com/huggingface/optimum-tpu/tree/main/text-generation-inference). Not all variants provide the same features, as hardware and middleware capabilities do not provide the same optimizations. ### Command Line Interface The official command line interface (CLI) for the server supports three subcommands, `download-weights`, `quantize` and `serve`: - `download-weights` will download weights from the hub and, in some variants it will convert weights to a format that is adapted to the given implementation; - `quantize` will allow to quantize a model using the `qptq` package. This feature is not available nor supported on all variants; - `serve` will start the server that load a model (or a model shard), receives gRPC calls from the router, performs an inference and provides a formatted response to the given request. Serve's command line parameters on the TGI repository are these: ``` Usage: cli.py serve [OPTIONS] MODEL_ID โ•ญโ”€ Arguments โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ โ”‚ * model_id TEXT [default: None] [required] โ”‚ โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ โ•ญโ”€ Options โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ โ”‚ --revision TEXT [default: None] โ”‚ โ”‚ --sharded --no-sharded [default: no-sharded] โ”‚ โ”‚ --quantize [bitsandbytes|bitsandbytes [default: None] โ”‚ โ”‚ -nf4|bitsandbytes-fp4|gptq โ”‚ โ”‚ |awq|eetq|exl2|fp8] โ”‚ โ”‚ --speculate INTEGER [default: None] โ”‚ โ”‚ --dtype [float16|bfloat16] [default: None] โ”‚ โ”‚ --trust-remote-code --no-trust-remote-code [default: โ”‚ โ”‚ no-trust-remote-code] โ”‚ โ”‚ --uds-path PATH [default: โ”‚ โ”‚ /tmp/text-generation-serveโ€ฆ โ”‚ โ”‚ --logger-level TEXT [default: INFO] โ”‚ โ”‚ --json-output --no-json-output [default: no-json-output] โ”‚ โ”‚ --otlp-endpoint TEXT [default: None] โ”‚ โ”‚ --otlp-service-name TEXT [default: โ”‚ โ”‚ text-generation-inference...โ”‚ โ”‚ --help Show this message and exit. โ”‚ โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ ``` Note that some variants might support different parameters, and they could possibly accept more options that can be passed on using environment variables. ## Call Flow Once both components are initialized, weights downloaded and model server is up and running, router and model server exchange data and info through the gRPC call. There are currently two supported schemas, [v2](https://github.com/huggingface/text-generation-inference/blob/main/proto/generate.proto) and [v3](https://github.com/huggingface/text-generation-inference/blob/main/proto/v3/generate.proto). These two versions are almost identical, except for: - input chunks support, for text and image data, - paged attention support Here's a diagram that displays the exchanges that follow the router and model server startup. ```mermaid sequenceDiagram Router->>Model Server: service discovery Model Server-->>Router: urls for other shards Router->>Model Server: get model info Model Server-->>Router: shard info Router->>Model Server: health check Model Server-->>Router: health OK Router->>Model Server: warmup(max_input_tokens, max_batch_prefill_tokens, max_total_tokens, max_batch_size) Model Server-->>Router: warmup result ``` After these are done, the router is ready to receive generate calls from multiple clients. Here's an example. ```mermaid sequenceDiagram participant Client 1 participant Client 2 participant Client 3 participant Router participant Model Server Client 1->>Router: generate_stream Router->>Model Server: prefill(batch1) Model Server-->>Router: generations, cached_batch1, timings Router-->>Client 1: token 1 Router->>Model Server: decode(cached_batch1) Model Server-->>Router: generations, cached_batch1, timings Router-->>Client 1: token 2 Router->>Model Server: decode(cached_batch1) Model Server-->>Router: generations, cached_batch1, timings Router-->>Client 1: token 3 Client 2->>Router: generate_stream Router->>Model Server: prefill(batch2) Note right of Model Server: This stops previous batch, that is restarted Model Server-->>Router: generations, cached_batch2, timings Router-->>Client 2: token 1' Router->>Model Server: decode(cached_batch1, cached_batch2) Model Server-->>Router: generations, cached_batch1, timings Router-->>Client 1: token 4 Router-->>Client 2: token 2' Note left of Client 1: Client 1 leaves Router->>Model Server: filter_batch(cached_batch1, request_ids_to_keep=batch2) Model Server-->>Router: filtered batch Router->>Model Server: decode(cached_batch2) Model Server-->>Router: generations, cached_batch2, timings Router-->>Client 2: token 3' Client 3->>Router: generate_stream Note right of Model Server: This stops previous batch, that is restarted Router->>Model Server: prefill(batch3) Note left of Client 1: Client 3 leaves without receiving any batch Router->>Model Server: clear_cache(batch3) Note right of Model Server: This stops previous batch, that is restarted Router->>Model Server: decode(cached_batch3) Note right of Model Server: Last token (stopping criteria) Model Server-->>Router: generations, cached_batch3, timings Router-->>Client 2: token 4' ```
text-generation-inference/docs/source/architecture.md/0
{ "file_path": "text-generation-inference/docs/source/architecture.md", "repo_id": "text-generation-inference", "token_count": 5181 }
237
# Safetensors Safetensors is a model serialization format for deep learning models. It is [faster](https://huggingface.co/docs/safetensors/speed) and safer compared to other serialization formats like pickle (which is used under the hood in many deep learning libraries). TGI depends on safetensors format mainly to enable [tensor parallelism sharding](./tensor_parallelism). For a given model repository during serving, TGI looks for safetensors weights. If there are no safetensors weights, TGI converts the PyTorch weights to safetensors format. You can learn more about safetensors by reading the [safetensors documentation](https://huggingface.co/docs/safetensors/index).
text-generation-inference/docs/source/conceptual/safetensors.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/safetensors.md", "repo_id": "text-generation-inference", "token_count": 184 }
238
# Collection of Usage Statistics Text Generation Inference collects anonymous usage statistics to help us improve the service. The collected data is used to improve TGI and to understand what causes failures. The data is collected transparently and any sensitive information is omitted. Data is sent twice, once on server startup and once when server stops. Also, usage statistics are only enabled when TGI is running in docker to avoid collecting data then TGI runs directly on the host machine. ## What data is collected The code that collects the data is available [here](https://github.com/huggingface/text-generation-inference/blob/main/router/src/usage_stats.rs). As of release 2.1.2 this is an example of the data collected: - From the TGI configuration: ```json { "event_type": "start", "disable_grammar_support": false, "max_batch_prefill_tokens": 4096, "max_batch_size": null, "max_batch_total_tokens": null, "max_best_of": 2, "max_client_batch_size": 4, "max_concurrent_requests": 128, "max_input_tokens": 1024, "max_stop_sequences": 4, "max_top_n_tokens": 5, "max_total_tokens": 2048, "max_waiting_tokens": 20, "messages_api_enabled": false, "model_config": { "model_type": "Bloom" }, "revision": null, "tokenizer_class": "BloomTokenizerFast", "validation_workers": 2, "waiting_served_ratio": 1.2, "docker_label": "latest", "git_sha": "cfc118704880453d29bcbe4fbbd91dda501cf5fe", "nvidia_env": { "name": "NVIDIA A10G", "pci_bus_id": "00000000:00:1E.0", "driver_version": "535.183.01", "pstate": "P8", "pcie_link_gen_max": "4", "pcie_link_gen_current": "1", "temperature_gpu": "31", "utilization_gpu": "0 %", "utilization_memory": "0 %", "memory_total": "23028 MiB", "memory_free": "22515 MiB", "memory_used": "0 MiB", "reset_status_reset_required": "No", "reset_status_drain_and_reset_recommended": "No", "compute_cap": "8.6", "ecc_errors_corrected_volatile_total": "0", "mig_mode_current": "[N/A]", "power_draw_instant": "10.86 W", "power_limit": "300.00 W" }, "system_env": { "cpu_count": 16, "cpu_type": "AMD EPYC 7R32", "total_memory": 66681196544, "architecture": "x86_64", "platform": "linux-unix-x86_64" } } ``` ## How to opt-out By passing the `--usage-stats` to the text-generation-launcher you can control how much usage statistics are being collected. `--usage-stats=no-stack` will not emit the stack traces from errors and the error types, but will continue to send start and stop events `--usage-stats=off` will completely disable everything
text-generation-inference/docs/source/usage_statistics.md/0
{ "file_path": "text-generation-inference/docs/source/usage_statistics.md", "repo_id": "text-generation-inference", "token_count": 971 }
239
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 338, "logprob": -9.0859375, "text": "is" }, { "id": 21784, "logprob": -10.90625, "text": "Deep" }, { "id": 29257, "logprob": -2.65625, "text": "Learning" }, { "id": 29973, "logprob": -4.8085938, "text": "?" } ], "seed": 0, "tokens": [ { "id": 13, "logprob": -0.19958496, "special": false, "text": "\n" }, { "id": 4013, "logprob": -2.203125, "special": false, "text": "This" }, { "id": 1139, "logprob": -0.23693848, "special": false, "text": " question" }, { "id": 756, "logprob": 0.0, "special": false, "text": " has" }, { "id": 1063, "logprob": -0.076538086, "special": false, "text": " been" }, { "id": 4433, "logprob": 0.0, "special": false, "text": " asked" }, { "id": 1784, "logprob": -1.1367188, "special": false, "text": " many" }, { "id": 3064, "logprob": 0.0, "special": false, "text": " times" }, { "id": 322, "logprob": -1.7460938, "special": false, "text": " and" }, { "id": 306, "logprob": 0.0, "special": false, "text": " I" } ], "top_tokens": null }, "generated_text": "What is Deep Learning?\nThis question has been asked many times and I" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_all_params.json", "repo_id": "text-generation-inference", "token_count": 1165 }
240
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 2, "logprob": null, "text": "<bos>" }, { "id": 2015, "logprob": -9.640625, "text": "Test" }, { "id": 3853, "logprob": -10.375, "text": " request" } ], "seed": 0, "tokens": [ { "id": 604, "logprob": -0.2824707, "special": false, "text": " for" }, { "id": 573, "logprob": -0.19030762, "special": false, "text": " the" }, { "id": 16819, "logprob": -1.4892578, "special": false, "text": " detection" }, { "id": 576, "logprob": -0.7011719, "special": false, "text": " of" }, { "id": 573, "logprob": -2.0195312, "special": false, "text": " the" }, { "id": 8566, "logprob": 0.0, "special": false, "text": " presence" }, { "id": 689, "logprob": -0.16491699, "special": false, "text": " or" }, { "id": 14862, "logprob": 0.0, "special": false, "text": " absence" }, { "id": 576, "logprob": -0.9946289, "special": false, "text": " of" }, { "id": 671, "logprob": -0.5263672, "special": false, "text": " an" } ], "top_tokens": null }, "generated_text": "Test request for the detection of the presence or absence of an" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma_gptq/test_flash_gemma_gptq_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma_gptq/test_flash_gemma_gptq_all_params.json", "repo_id": "text-generation-inference", "token_count": 1046 }
241
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 5, "prefill": [ { "id": 128000, "logprob": null, "text": "<|begin_of_text|>" }, { "id": 2323, "logprob": -9.5625, "text": "Test" }, { "id": 1715, "logprob": -10.4375, "text": " request" } ], "seed": 0, "tokens": [ { "id": 25, "logprob": -0.8984375, "special": false, "text": ":" }, { "id": 923, "logprob": -2.84375, "special": false, "text": " add" }, { "id": 264, "logprob": 0.0, "special": false, "text": " a" }, { "id": 330, "logprob": -0.31640625, "special": false, "text": " \"" }, { "id": 1985, "logprob": 0.0, "special": false, "text": "test" } ], "top_tokens": null }, "generated_text": "Test request: add a \"test" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8/test_flash_llama_fp8_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8/test_flash_llama_fp8_all_params.json", "repo_id": "text-generation-inference", "token_count": 671 }
242
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.55078125, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4140625, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94140625, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8173828, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2978516, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0664062, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9560547, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1787109, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.54785156, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4111328, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0292969, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94433594, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8178711, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2939453, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0644531, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9550781, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1796875, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.55078125, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4140625, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94140625, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8173828, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2978516, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0664062, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9560547, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1787109, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.55078125, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4140625, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94140625, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.8173828, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2978516, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0664062, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9560547, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5078125, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1787109, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_load.json", "repo_id": "text-generation-inference", "token_count": 4897 }
243
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 60, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.625, "text": " print" }, { "id": 81, "logprob": -1.6064453, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": 0, "tokens": [ { "id": 2262, "logprob": -0.045715332, "special": false, "text": "():" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -0.3659668, "special": false, "text": " World" }, { "id": 657, "logprob": -0.5229492, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.10632324, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": -0.20141602, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": -0.051635742, "special": false, "text": "name" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": -0.16027832, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 7656, "logprob": 0.0, "special": false, "text": "hello" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 81, "logprob": 0.0, "special": false, "text": "_" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 426, "logprob": 0.0, "special": false, "text": "name" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 11442, "logprob": 0.0, "special": false, "text": " age" }, { "id": 711, "logprob": 0.0, "special": false, "text": "):" }, { "id": 284, "logprob": 0.0, "special": false, "text": "\n " }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" }, { "id": 440, "logprob": 0.0, "special": false, "text": "(\"" }, { "id": 8279, "logprob": 0.0, "special": false, "text": "Hello" }, { "id": 313, "logprob": 0.0, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 636, "logprob": 0.0, "special": false, "text": " name" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 313, "logprob": -0.6933594, "special": false, "text": " \"" }, { "id": 313, "logprob": -1.7011719, "special": false, "text": " \"" }, { "id": 474, "logprob": 0.0, "special": false, "text": " +" }, { "id": 596, "logprob": 0.0, "special": false, "text": " str" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 381, "logprob": 0.0, "special": false, "text": "age" }, { "id": 490, "logprob": 0.0, "special": false, "text": "))" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 1459, "logprob": 0.0, "special": false, "text": " print" } ], "top_tokens": null }, "generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json", "repo_id": "text-generation-inference", "token_count": 4747 }
244
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 19, "prefill": [], "seed": null, "tokens": [ { "id": 415, "logprob": -0.03665161, "special": false, "text": " The" }, { "id": 12072, "logprob": -0.13549805, "special": false, "text": " cow" }, { "id": 349, "logprob": -0.05819702, "special": false, "text": " is" }, { "id": 6328, "logprob": -0.6826172, "special": false, "text": " standing" }, { "id": 356, "logprob": -0.1607666, "special": false, "text": " on" }, { "id": 272, "logprob": -0.5073242, "special": false, "text": " the" }, { "id": 10305, "logprob": -0.016418457, "special": false, "text": " beach" }, { "id": 304, "logprob": -1.3916016, "special": false, "text": " and" }, { "id": 272, "logprob": -0.020217896, "special": false, "text": " the" }, { "id": 13088, "logprob": -0.0028133392, "special": false, "text": " chicken" }, { "id": 349, "logprob": -0.003145218, "special": false, "text": " is" }, { "id": 6398, "logprob": -0.37060547, "special": false, "text": " sitting" }, { "id": 356, "logprob": -0.034851074, "special": false, "text": " on" }, { "id": 264, "logprob": -0.2878418, "special": false, "text": " a" }, { "id": 17972, "logprob": -0.046051025, "special": false, "text": " pile" }, { "id": 302, "logprob": -0.00028848648, "special": false, "text": " of" }, { "id": 2445, "logprob": -0.025772095, "special": false, "text": " money" }, { "id": 28723, "logprob": -0.018127441, "special": false, "text": "." }, { "id": 32002, "logprob": -0.00019824505, "special": true, "text": "<end_of_utterance>" } ], "top_tokens": null }, "generated_text": " The cow is standing on the beach and the chicken is sitting on a pile of money." }
text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_two_images.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_two_images.json", "repo_id": "text-generation-inference", "token_count": 1559 }
245