| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from dataclasses import dataclass |
| from typing import List, Optional, Tuple, Union |
|
|
| import numpy as np |
| import torch |
| from PIL import Image |
| from tqdm.auto import tqdm |
| from transformers import CLIPTextModel, CLIPTokenizer |
|
|
| from diffusers.image_processor import PipelineImageInput |
| from diffusers.models import ( |
| AutoencoderKL, |
| UNet2DConditionModel, |
| ) |
| from diffusers.schedulers import ( |
| DDIMScheduler, |
| ) |
| from diffusers.utils import ( |
| BaseOutput, |
| logging, |
| ) |
| from diffusers import DiffusionPipeline |
| from diffusers.pipelines.marigold.marigold_image_processing import MarigoldImageProcessor |
|
|
| |
| def zeros_tensor( |
| shape: Union[Tuple, List], |
| device: Optional["torch.device"] = None, |
| dtype: Optional["torch.dtype"] = None, |
| layout: Optional["torch.layout"] = None, |
| ): |
| """ |
| A helper function to create tensors of zeros on the desired `device`. |
| Mirrors randn_tensor from diffusers.utils.torch_utils. |
| """ |
| layout = layout or torch.strided |
| device = device or torch.device("cpu") |
| latents = torch.zeros(list(shape), dtype=dtype, layout=layout).to(device) |
| return latents |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| @dataclass |
| class E2EMarigoldNormalsOutput(BaseOutput): |
| """ |
| Output class for Marigold monocular normals prediction pipeline. |
| |
| Args: |
| prediction (`np.ndarray`, `torch.Tensor`): |
| Predicted normals with values in the range [-1, 1]. The shape is always $numimages \times 3 \times height |
| \times width$, regardless of whether the images were passed as a 4D array or a list. |
| latent (`None`, `torch.Tensor`): |
| Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline. |
| The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$. |
| """ |
|
|
| prediction: Union[np.ndarray, torch.Tensor] |
| latent: Union[None, torch.Tensor] |
|
|
|
|
| class E2EMarigoldNormalsPipeline(DiffusionPipeline): |
| """ |
| # add |
| Pipeline for monocular normals estimation using the E2E FT Marigold and SD method: https://gonzalomartingarcia.github.io/diffusion-e2e-ft/ |
| Implementation is built upon Marigold: https://marigoldmonodepth.github.io |
| |
| This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
| library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
| |
| Args: |
| unet (`UNet2DConditionModel`): |
| Conditional U-Net to denoise the normals latent, conditioned on image latent. |
| vae (`AutoencoderKL`): |
| Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent |
| representations. |
| scheduler (`DDIMScheduler` or `LCMScheduler`): |
| A scheduler to be used in combination with `unet` to denoise the encoded image latents. |
| text_encoder (`CLIPTextModel`): |
| Text-encoder, for empty text embedding. |
| tokenizer (`CLIPTokenizer`): |
| CLIP tokenizer. |
| default_processing_resolution (`int`, *optional*): |
| The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in |
| the model config. When the pipeline is called without explicitly setting `processing_resolution`, the |
| default value is used. This is required to ensure reasonable results with various model flavors trained |
| with varying optimal processing resolution values. |
| """ |
|
|
| model_cpu_offload_seq = "text_encoder->unet->vae" |
|
|
| def __init__( |
| self, |
| unet: UNet2DConditionModel, |
| vae: AutoencoderKL, |
| scheduler: Union[DDIMScheduler], |
| text_encoder: CLIPTextModel, |
| tokenizer: CLIPTokenizer, |
| default_processing_resolution: Optional[int] = 768, |
| ): |
| super().__init__() |
|
|
| self.register_modules( |
| unet=unet, |
| vae=vae, |
| scheduler=scheduler, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| ) |
| self.register_to_config( |
| default_processing_resolution=default_processing_resolution, |
| ) |
|
|
| self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
| self.default_processing_resolution = default_processing_resolution |
| self.empty_text_embedding = None |
|
|
| self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) |
|
|
| def check_inputs( |
| self, |
| image: PipelineImageInput, |
| processing_resolution: int, |
| resample_method_input: str, |
| resample_method_output: str, |
| batch_size: int, |
| output_type: str, |
| ) -> int: |
| if processing_resolution is None: |
| raise ValueError( |
| "`processing_resolution` is not specified and could not be resolved from the model config." |
| ) |
| if processing_resolution < 0: |
| raise ValueError( |
| "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " |
| "downsampled processing." |
| ) |
| if processing_resolution % self.vae_scale_factor != 0: |
| raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.") |
| if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): |
| raise ValueError( |
| "`resample_method_input` takes string values compatible with PIL library: " |
| "nearest, nearest-exact, bilinear, bicubic, area." |
| ) |
| if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): |
| raise ValueError( |
| "`resample_method_output` takes string values compatible with PIL library: " |
| "nearest, nearest-exact, bilinear, bicubic, area." |
| ) |
| if batch_size < 1: |
| raise ValueError("`batch_size` must be positive.") |
| if output_type not in ["pt", "np"]: |
| raise ValueError("`output_type` must be one of `pt` or `np`.") |
|
|
| |
| num_images = 0 |
| W, H = None, None |
| if not isinstance(image, list): |
| image = [image] |
| for i, img in enumerate(image): |
| if isinstance(img, np.ndarray) or torch.is_tensor(img): |
| if img.ndim not in (2, 3, 4): |
| raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") |
| H_i, W_i = img.shape[-2:] |
| N_i = 1 |
| if img.ndim == 4: |
| N_i = img.shape[0] |
| elif isinstance(img, Image.Image): |
| W_i, H_i = img.size |
| N_i = 1 |
| else: |
| raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") |
| if W is None: |
| W, H = W_i, H_i |
| elif (W, H) != (W_i, H_i): |
| raise ValueError( |
| f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" |
| ) |
| num_images += N_i |
|
|
| if processing_resolution > 0: |
| max_orig = max(H, W) |
| new_H = H * processing_resolution // max_orig |
| new_W = W * processing_resolution // max_orig |
| if new_H == 0 or new_W == 0: |
| raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") |
| W, H = new_W, new_H |
| w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor |
| h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor |
| shape_expected = (num_images, self.vae.config.latent_channels, h, w) |
|
|
| return num_images |
|
|
| def progress_bar(self, iterable=None, total=None, desc=None, leave=True): |
| if not hasattr(self, "_progress_bar_config"): |
| self._progress_bar_config = {} |
| elif not isinstance(self._progress_bar_config, dict): |
| raise ValueError( |
| f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." |
| ) |
|
|
| progress_bar_config = dict(**self._progress_bar_config) |
| progress_bar_config["desc"] = progress_bar_config.get("desc", desc) |
| progress_bar_config["leave"] = progress_bar_config.get("leave", leave) |
| if iterable is not None: |
| return tqdm(iterable, **progress_bar_config) |
| elif total is not None: |
| return tqdm(total=total, **progress_bar_config) |
| else: |
| raise ValueError("Either `total` or `iterable` has to be defined.") |
|
|
| @torch.no_grad() |
| def __call__( |
| self, |
| image: PipelineImageInput, |
| processing_resolution: Optional[int] = None, |
| match_input_resolution: bool = True, |
| resample_method_input: str = "bilinear", |
| resample_method_output: str = "bilinear", |
| batch_size: int = 1, |
| output_type: str = "np", |
| output_latent: bool = False, |
| return_dict: bool = True, |
| ): |
| """ |
| Function invoked when calling the pipeline. |
| |
| Args: |
| image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`), |
| `List[torch.Tensor]`: An input image or images used as an input for the normals estimation task. For |
| arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible |
| by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or |
| three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the |
| same width and height. |
| processing_resolution (`int`, *optional*, defaults to `None`): |
| Effective processing resolution. When set to `0`, matches the larger input image dimension. This |
| produces crisper predictions, but may also lead to the overall loss of global context. The default |
| value `None` resolves to the optimal value from the model config. |
| match_input_resolution (`bool`, *optional*, defaults to `True`): |
| When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer |
| side of the output will equal to `processing_resolution`. |
| resample_method_input (`str`, *optional*, defaults to `"bilinear"`): |
| Resampling method used to resize input images to `processing_resolution`. The accepted values are: |
| `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. |
| resample_method_output (`str`, *optional*, defaults to `"bilinear"`): |
| Resampling method used to resize output predictions to match the input resolution. The accepted values |
| are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. |
| batch_size (`int`, *optional*, defaults to `1`): |
| Batch size; only matters when passing a tensor of images. |
| output_type (`str`, *optional*, defaults to `"np"`): |
| Preferred format of the output's `prediction`. The accepted values are: `"np"` (numpy array) or `"pt"` (torch tensor). |
| output_latent (`bool`, *optional*, defaults to `False`): |
| When enabled, the output's `latent` field contains the latent codes corresponding to the predictions |
| within the ensemble. These codes can be saved, modified, and used for subsequent calls with the |
| `latents` argument. |
| return_dict (`bool`, *optional*, defaults to `True`): |
| Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple. |
| |
| # add |
| E2E FT models are deterministic single step models involving no ensembling, i.e. E=1. |
| """ |
|
|
| |
| device = self._execution_device |
| dtype = self.dtype |
|
|
| |
| if processing_resolution is None: |
| processing_resolution = self.default_processing_resolution |
|
|
| |
| num_images = self.check_inputs( |
| image, |
| processing_resolution, |
| resample_method_input, |
| resample_method_output, |
| batch_size, |
| output_type, |
| ) |
|
|
| |
| |
| if self.empty_text_embedding is None: |
| prompt = "" |
| text_inputs = self.tokenizer( |
| prompt, |
| padding="do_not_pad", |
| max_length=self.tokenizer.model_max_length, |
| truncation=True, |
| return_tensors="pt", |
| ) |
| text_input_ids = text_inputs.input_ids.to(device) |
| self.empty_text_embedding = self.text_encoder(text_input_ids)[0] |
|
|
| |
| |
| |
| |
| |
| |
| |
| image, padding, original_resolution = self.image_processor.preprocess( |
| image, processing_resolution, resample_method_input, device, dtype |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| image_latent, pred_latent = self.prepare_latents( |
| image, batch_size |
| ) |
|
|
| del image |
|
|
| batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( |
| batch_size, 1, 1 |
| ) |
|
|
| |
| |
| |
| |
| pred_latents = [] |
|
|
| for i in self.progress_bar( |
| range(0, num_images, batch_size), leave=True, desc="E2E FT predictions..." |
| ): |
| batch_image_latent = image_latent[i : i + batch_size] |
| batch_pred_latent = pred_latent[i : i + batch_size] |
| effective_batch_size = batch_image_latent.shape[0] |
| text = batch_empty_text_embedding[:effective_batch_size] |
|
|
| |
| |
| self.scheduler.set_timesteps(1, device=device) |
| for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): |
| batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) |
| noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] |
| batch_pred_latent = self.scheduler.step( |
| noise, t, batch_pred_latent |
| ).pred_original_sample |
| |
|
|
| pred_latents.append(batch_pred_latent) |
|
|
| pred_latent = torch.cat(pred_latents, dim=0) |
|
|
| del ( |
| pred_latents, |
| image_latent, |
| batch_empty_text_embedding, |
| batch_image_latent, |
| batch_pred_latent, |
| text, |
| batch_latent, |
| noise, |
| ) |
|
|
| |
| |
| |
| prediction = torch.cat( |
| [ |
| self.decode_prediction(pred_latent[i : i + batch_size]) |
| for i in range(0, pred_latent.shape[0], batch_size) |
| ], |
| dim=0, |
| ) |
|
|
| if not output_latent: |
| pred_latent = None |
|
|
| |
| prediction = self.image_processor.unpad_image(prediction, padding) |
|
|
| |
| |
| |
| |
| |
| if match_input_resolution: |
| prediction = self.image_processor.resize_antialias( |
| prediction, original_resolution, resample_method_output, is_aa=False |
| ) |
| prediction = self.normalize_normals(prediction) |
|
|
| |
| if output_type == "np": |
| prediction = self.image_processor.pt_to_numpy(prediction) |
|
|
| |
| self.maybe_free_model_hooks() |
|
|
| if not return_dict: |
| return (prediction, pred_latent) |
|
|
| return E2EMarigoldNormalsOutput( |
| prediction=prediction, |
| latent=pred_latent, |
| ) |
|
|
| |
| def prepare_latents( |
| self, |
| image: torch.Tensor, |
| batch_size: int, |
| ) -> Tuple[torch.Tensor, torch.Tensor]: |
| def retrieve_latents(encoder_output): |
| if hasattr(encoder_output, "latent_dist"): |
| return encoder_output.latent_dist.mode() |
| elif hasattr(encoder_output, "latents"): |
| return encoder_output.latents |
| else: |
| raise AttributeError("Could not access latents of provided encoder_output") |
|
|
| image_latent = torch.cat( |
| [ |
| retrieve_latents(self.vae.encode(image[i : i + batch_size])) |
| for i in range(0, image.shape[0], batch_size) |
| ], |
| dim=0, |
| ) |
| image_latent = image_latent * self.vae.config.scaling_factor |
|
|
| |
| |
| pred_latent = zeros_tensor( |
| image_latent.shape, |
| device=image_latent.device, |
| dtype=image_latent.dtype, |
| ) |
|
|
| return image_latent, pred_latent |
|
|
| def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: |
| if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: |
| raise ValueError( |
| f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." |
| ) |
|
|
| prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] |
| |
| |
| prediction = self.normalize_normals(prediction) |
| prediction = torch.clip(prediction, -1.0, 1.0) |
|
|
| return prediction |
|
|
| @staticmethod |
| def normalize_normals(normals: torch.Tensor, eps: float = 1e-6) -> torch.Tensor: |
| if normals.dim() != 4 or normals.shape[1] != 3: |
| raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.") |
|
|
| norm = torch.norm(normals, dim=1, keepdim=True) |
| normals /= norm.clamp(min=eps) |
|
|
| return normals |