| import math |
| import random |
| from functools import partial |
| from typing import Optional, Tuple, Union |
| from itertools import chain |
|
|
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
|
|
| from models.basic_var import AdaLNBeforeHead, AdaLNSelfAttn |
| from models.helpers import gumbel_softmax_with_rng, sample_with_top_k_top_p_ |
| from models.vqvae import VQVAE, VectorQuantizer2 |
|
|
|
|
| patch_nums=(1, 2, 3) |
| L = sum(pn ** 2 for pn in patch_nums) |
| first_l = patch_nums[0] ** 2 |
| num_stages_minus_1 = len(patch_nums) - 1 |
|
|
|
|
| B=8 |
| C=1024 |
| Cvae=32 |
| init_std = math.sqrt(1 / C / 3) |
| num_classes=1000 |
| class_emb = nn.Embedding(num_classes + 1, C) |
|
|
| |
| vae_local = VQVAE() |
| quant: VectorQuantizer2 = vae_local.quantize |
| vae_proxy: Tuple[VQVAE] = (vae_local,) |
| vae_quant_proxy: Tuple[VectorQuantizer2] = (quant,) |
| word_embed = nn.Linear(Cvae, C) |
| uniform_prob = torch.full((1, num_classes), fill_value=1.0 / num_classes, dtype=torch.float32) |
|
|
| |
| rng = torch.Generator() |
| rng.manual_seed(42) |
|
|
| label_B = torch.multinomial(uniform_prob, num_samples=B, replacement=True, generator=rng).reshape(B) |
|
|
| sos = cond_BD = class_emb(torch.cat((label_B, torch.full_like(label_B, fill_value=num_classes)), dim=0)) |
|
|
| |
| pos_1LC = [] |
| for i, pn in enumerate(patch_nums): |
| pe = torch.empty(1, pn*pn, C) |
| nn.init.trunc_normal_(pe, mean=0, std=init_std) |
| pos_1LC.append(pe) |
| pos_1LC = torch.cat(pos_1LC, dim=1) |
| assert tuple(pos_1LC.shape) == (1, L, C) |
| pos_1LC = nn.Parameter(pos_1LC) |
| |
| lvl_embed = nn.Embedding(len(patch_nums), C) |
| nn.init.trunc_normal_(lvl_embed.weight.data, mean=0, std=init_std) |
|
|
| pos_start = nn.Parameter(torch.empty(1, first_l, C)) |
|
|
| |
| d: torch.Tensor = torch.cat([torch.full((pn*pn,), i) for i, pn in enumerate(patch_nums)]).view(1, L, 1) |
| dT = d.transpose(1, 2) |
| lvl_1L = dT[:, 0].contiguous() |
| attn_bias_for_masking = torch.where(d >= dT, 0., -torch.inf).reshape(1, 1, L, L) |
| |
|
|
| lvl_pos = lvl_embed(lvl_1L) + pos_1LC |
| next_token_map = sos.unsqueeze(1).expand(2 * B, first_l, -1) + pos_start.expand(2 * B, first_l, -1) + lvl_pos[:, :first_l] |
|
|
| cur_L = 0 |
| f_hat = sos.new_zeros(B, Cvae, patch_nums[-1], patch_nums[-1]) |
|
|
|
|
| for si, pn in enumerate(patch_nums): |
| print("si pn") |
| print(si, pn) |
| print() |
| ratio = si / num_stages_minus_1 |
| |
| print("cur_L") |
| cur_L += pn*pn |
| print(cur_L) |
| print() |
| |
|
|
| h_BChw = torch.randn(B, L, C) |
| h_BChw = h_BChw.transpose_(1, 2).reshape(B, Cvae, pn, pn) |
| f_hat, next_token_map = vae_quant_proxy[0].get_next_autoregressive_input(si, len(patch_nums), f_hat, h_BChw) |
| |
| if si != num_stages_minus_1: |
| next_token_map = next_token_map.view(B, Cvae, -1).transpose(1, 2) |
| print(next_token_map) |
| print() |
| next_token_map = word_embed(next_token_map) + lvl_pos[:, cur_L:cur_L + patch_nums[si+1] ** 2] |
| print(next_token_map) |
| print() |
| next_token_map = next_token_map.repeat(2, 1, 1) |
| print(next_token_map) |
| print() |
|
|
| |