|
| 1 | +''' |
| 2 | +Base class for ldm.dream.generator.* |
| 3 | +including img2img, txt2img, and inpaint |
| 4 | +''' |
| 5 | +import torch |
| 6 | +import numpy as np |
| 7 | +import random |
| 8 | +from tqdm import tqdm, trange |
| 9 | +from PIL import Image |
| 10 | +from einops import rearrange, repeat |
| 11 | +from pytorch_lightning import seed_everything |
| 12 | +from ldm.dream.devices import choose_autocast_device |
| 13 | + |
| 14 | +downsampling = 8 |
| 15 | + |
| 16 | +class Generator(): |
| 17 | + def __init__(self,model): |
| 18 | + self.model = model |
| 19 | + self.seed = None |
| 20 | + self.latent_channels = model.channels |
| 21 | + self.downsampling_factor = downsampling # BUG: should come from model or config |
| 22 | + self.variation_amount = 0 |
| 23 | + self.with_variations = [] |
| 24 | + |
| 25 | + # this is going to be overridden in img2img.py, txt2img.py and inpaint.py |
| 26 | + def get_make_image(self,prompt,**kwargs): |
| 27 | + """ |
| 28 | + Returns a function returning an image derived from the prompt and the initial image |
| 29 | + Return value depends on the seed at the time you call it |
| 30 | + """ |
| 31 | + raise NotImplementedError("image_iterator() must be implemented in a descendent class") |
| 32 | + |
| 33 | + def set_variation(self, seed, variation_amount, with_variations): |
| 34 | + self.seed = seed |
| 35 | + self.variation_amount = variation_amount |
| 36 | + self.with_variations = with_variations |
| 37 | + |
| 38 | + def generate(self,prompt,init_image,width,height,iterations=1,seed=None, |
| 39 | + image_callback=None, step_callback=None, |
| 40 | + **kwargs): |
| 41 | + device_type,scope = choose_autocast_device(self.model.device) |
| 42 | + make_image = self.get_make_image( |
| 43 | + prompt, |
| 44 | + init_image = init_image, |
| 45 | + width = width, |
| 46 | + height = height, |
| 47 | + step_callback = step_callback, |
| 48 | + **kwargs |
| 49 | + ) |
| 50 | + |
| 51 | + results = [] |
| 52 | + seed = seed if seed else self.new_seed() |
| 53 | + seed, initial_noise = self.generate_initial_noise(seed, width, height) |
| 54 | + with scope(device_type), self.model.ema_scope(): |
| 55 | + for n in trange(iterations, desc='Generating'): |
| 56 | + x_T = None |
| 57 | + if self.variation_amount > 0: |
| 58 | + seed_everything(seed) |
| 59 | + target_noise = self.get_noise(width,height) |
| 60 | + x_T = self.slerp(self.variation_amount, initial_noise, target_noise) |
| 61 | + elif initial_noise is not None: |
| 62 | + # i.e. we specified particular variations |
| 63 | + x_T = initial_noise |
| 64 | + else: |
| 65 | + seed_everything(seed) |
| 66 | + if self.model.device.type == 'mps': |
| 67 | + x_T = self.get_noise(width,height) |
| 68 | + |
| 69 | + # make_image will do the equivalent of get_noise itself |
| 70 | + image = make_image(x_T) |
| 71 | + results.append([image, seed]) |
| 72 | + if image_callback is not None: |
| 73 | + image_callback(image, seed) |
| 74 | + seed = self.new_seed() |
| 75 | + return results |
| 76 | + |
| 77 | + def sample_to_image(self,samples): |
| 78 | + """ |
| 79 | + Returns a function returning an image derived from the prompt and the initial image |
| 80 | + Return value depends on the seed at the time you call it |
| 81 | + """ |
| 82 | + x_samples = self.model.decode_first_stage(samples) |
| 83 | + x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) |
| 84 | + if len(x_samples) != 1: |
| 85 | + raise Exception( |
| 86 | + f'>> expected to get a single image, but got {len(x_samples)}') |
| 87 | + x_sample = 255.0 * rearrange( |
| 88 | + x_samples[0].cpu().numpy(), 'c h w -> h w c' |
| 89 | + ) |
| 90 | + return Image.fromarray(x_sample.astype(np.uint8)) |
| 91 | + |
| 92 | + def generate_initial_noise(self, seed, width, height): |
| 93 | + initial_noise = None |
| 94 | + if self.variation_amount > 0 or len(self.with_variations) > 0: |
| 95 | + # use fixed initial noise plus random noise per iteration |
| 96 | + seed_everything(seed) |
| 97 | + initial_noise = self.get_noise(width,height) |
| 98 | + for v_seed, v_weight in self.with_variations: |
| 99 | + seed = v_seed |
| 100 | + seed_everything(seed) |
| 101 | + next_noise = self.get_noise(width,height) |
| 102 | + initial_noise = self.slerp(v_weight, initial_noise, next_noise) |
| 103 | + if self.variation_amount > 0: |
| 104 | + random.seed() # reset RNG to an actually random state, so we can get a random seed for variations |
| 105 | + seed = random.randrange(0,np.iinfo(np.uint32).max) |
| 106 | + return (seed, initial_noise) |
| 107 | + else: |
| 108 | + return (seed, None) |
| 109 | + |
| 110 | + # returns a tensor filled with random numbers from a normal distribution |
| 111 | + def get_noise(self,width,height): |
| 112 | + """ |
| 113 | + Returns a tensor filled with random numbers, either form a normal distribution |
| 114 | + (txt2img) or from the latent image (img2img, inpaint) |
| 115 | + """ |
| 116 | + raise NotImplementedError("get_noise() must be implemented in a descendent class") |
| 117 | + |
| 118 | + def new_seed(self): |
| 119 | + self.seed = random.randrange(0, np.iinfo(np.uint32).max) |
| 120 | + return self.seed |
| 121 | + |
| 122 | + def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): |
| 123 | + ''' |
| 124 | + Spherical linear interpolation |
| 125 | + Args: |
| 126 | + t (float/np.ndarray): Float value between 0.0 and 1.0 |
| 127 | + v0 (np.ndarray): Starting vector |
| 128 | + v1 (np.ndarray): Final vector |
| 129 | + DOT_THRESHOLD (float): Threshold for considering the two vectors as |
| 130 | + colineal. Not recommended to alter this. |
| 131 | + Returns: |
| 132 | + v2 (np.ndarray): Interpolation vector between v0 and v1 |
| 133 | + ''' |
| 134 | + inputs_are_torch = False |
| 135 | + if not isinstance(v0, np.ndarray): |
| 136 | + inputs_are_torch = True |
| 137 | + v0 = v0.detach().cpu().numpy() |
| 138 | + if not isinstance(v1, np.ndarray): |
| 139 | + inputs_are_torch = True |
| 140 | + v1 = v1.detach().cpu().numpy() |
| 141 | + |
| 142 | + dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) |
| 143 | + if np.abs(dot) > DOT_THRESHOLD: |
| 144 | + v2 = (1 - t) * v0 + t * v1 |
| 145 | + else: |
| 146 | + theta_0 = np.arccos(dot) |
| 147 | + sin_theta_0 = np.sin(theta_0) |
| 148 | + theta_t = theta_0 * t |
| 149 | + sin_theta_t = np.sin(theta_t) |
| 150 | + s0 = np.sin(theta_0 - theta_t) / sin_theta_0 |
| 151 | + s1 = sin_theta_t / sin_theta_0 |
| 152 | + v2 = s0 * v0 + s1 * v1 |
| 153 | + |
| 154 | + if inputs_are_torch: |
| 155 | + v2 = torch.from_numpy(v2).to(self.model.device) |
| 156 | + |
| 157 | + return v2 |
| 158 | + |
0 commit comments