本文主要是介绍VQ-VAE torch 实现,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
文章目录
- model
- main
model
import torch
import torch.nn as nnclass ResidualBlock(nn.Module):def __init__(self, dim):super().__init__()self.relu = nn.ReLU()self.conv1 = nn.Conv2d(dim, dim, 3, 1, 1)self.conv2 = nn.Conv2d(dim, dim, 1)def forward(self, x):tmp = self.relu(x)tmp = self.conv1(tmp)tmp = self.relu(tmp)tmp = self.conv2(tmp)return x + tmpclass VQVAE(nn.Module):def __init__(self, input_dim, dim, n_embedding):super().__init__()self.encoder = nn.Sequential(nn.Conv2d(input_dim, dim, 4, 2, 1),nn.ReLU(), nn.Conv2d(dim, dim, 4, 2, 1),nn.ReLU(), nn.Conv2d(dim, dim, 3, 1, 1),ResidualBlock(dim), ResidualBlock(dim))self.vq_embedding = nn.Embedding(n_embedding, dim)self.vq_embedding.weight.data.uniform_(-1.0 / n_embedding,1.0 / n_embedding)self.decoder = nn.Sequential(nn.Conv2d(dim, dim, 3, 1, 1),ResidualBlock(dim), ResidualBlock(dim),nn.ConvTranspose2d(dim, dim, 4, 2, 1), nn.ReLU(),nn.ConvTranspose2d(dim, input_dim, 4, 2, 1))self.n_downsample = 2def forward(self, x):# encodeze = self.encoder(x)# ze: [N, C, H, W]# embedding [K, C]embedding = self.vq_embedding.weight.dataN, C, H, W = ze.shapeK, _ = embedding.shapeembedding_broadcast = embedding.reshape(1, K, C, 1, 1)ze_broadcast = ze.reshape(N, 1, C, H, W)distance = torch.sum((embedding_broadcast - ze_broadcast)**2, 2)nearest_neighbor = torch.argmin(distance, 1)# make C to the second dimzq = self.vq_embedding(nearest_neighbor).permute(0, 3, 1, 2)# stop gradientdecoder_input = ze + (zq - ze).detach()# decodex_hat = self.decoder(decoder_input)return x_hat, ze, zq@torch.no_grad()def encode(self, x):ze = self.encoder(x)embedding = self.vq_embedding.weight.data# ze: [N, C, H, W]# embedding [K, C]N, C, H, W = ze.shapeK, _ = embedding.shapeembedding_broadcast = embedding.reshape(1, K, C, 1, 1)ze_broadcast = ze.reshape(N, 1, C, H, W)distance = torch.sum((embedding_broadcast - ze_broadcast)**2, 2)nearest_neighbor = torch.argmin(distance, 1)return nearest_neighbor@torch.no_grad()def decode(self, discrete_latent):zq = self.vq_embedding(discrete_latent).permute(0, 3, 1, 2)x_hat = self.decoder(zq)return x_hat# Shape: [C, H, W]def get_latent_HW(self, input_shape):C, H, W = input_shapereturn (H // 2**self.n_downsample, W // 2**self.n_downsample)
main
def train_vqvae(model: VQVAE,img_shape=None,device='cuda',ckpt_path='dldemos/VQVAE/model.pth',batch_size=64,dataset_type='MNIST',lr=1e-3,n_epochs=100,l_w_embedding=1,l_w_commitment=0.25):print('batch size:', batch_size)dataloader = get_dataloader(dataset_type,batch_size,img_shape=img_shape,use_lmdb=USE_LMDB)model.to(device)model.train()optimizer = torch.optim.Adam(model.parameters(), lr)mse_loss = nn.MSELoss()tic = time.time()for e in range(n_epochs):total_loss = 0for x in dataloader:current_batch_size = x.shape[0]x = x.to(device)x_hat, ze, zq = model(x)l_reconstruct = mse_loss(x, x_hat)l_embedding = mse_loss(ze.detach(), zq)l_commitment = mse_loss(ze, zq.detach())loss = l_reconstruct + \l_w_embedding * l_embedding + l_w_commitment * l_commitmentoptimizer.zero_grad()loss.backward()optimizer.step()total_loss += loss.item() * current_batch_sizetotal_loss /= len(dataloader.dataset)toc = time.time()torch.save(model.state_dict(), ckpt_path)print(f'epoch {e} loss: {total_loss} elapsed {(toc - tic):.2f}s')print('Done')
这篇关于VQ-VAE torch 实现的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!