基于DRIVE数据集的视网膜UNet分割

2024-01-21 09:36

本文主要是介绍基于DRIVE数据集的视网膜UNet分割,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

1 数据集介绍

这是一个非常小的数据集,非常适合用于视觉分割任务练手。数据集的文件夹如图所示:

图1-1文件夹结构

test中存放的是测试图片,training中存放的是20张用于训练的图片。imges文件夹中存放的是20张原始图片,mask中存放的是掩码,用于获取感兴趣的区域。manual中存放的是人工标注的groundtruth。

图1-2 原始图片

图1-3 groundtruth 

2 数据集的加载

获取数据集中image和对应的mask

class DriveDataset(Dataset):def __init__(self, root: str, train: bool, transforms=None):super(DriveDataset, self).__init__()self.flag = "training" if train else "test"data_root = os.path.join(root, "DRIVE", self.flag)# 使用断言,目录不存在,则发出警告assert os.path.exists(data_root), f"path '{data_root}' does not exists."# 在transform 中对图像预处理self.transforms = transforms"""(os.path.join(data_root, "images") 获得目录os.listdir(os.path.join(data_root, "images")) 获取目录下的文件名,返回列表[i for i in ...] 使用for循环形成新的列表"""img_names = [i for i in os.listdir(os.path.join(data_root, "images")) if i.endswith(".tif")]# 获取图片与GT的完整路径self.img_list = [os.path.join(data_root, "images", i) for i in img_names]self.manual = [os.path.join(data_root, "1st_manual", i.split("_")[0] + "_manual1.gif")for i in img_names]# check filesfor i in self.manual:if os.path.exists(i) is False:raise FileNotFoundError(f"file {i} does not exists.")self.roi_mask = [os.path.join(data_root, "mask", i.split("_")[0] + f"_{self.flag}_mask.gif")for i in img_names]# check filesfor i in self.roi_mask:if os.path.exists(i) is False:raise FileNotFoundError(f"file {i} does not exists.")def __getitem__(self, idx):img = Image.open(self.img_list[idx]).convert('RGB')manual = Image.open(self.manual[idx]).convert('L')# 0:背景,1:前景,而此时的mask中的前景像素值是255,所以÷255,令其为1manual = np.array(manual) / 255roi_mask = Image.open(self.roi_mask[idx]).convert('L')# 将不感兴趣区域的no_roi区域的像素值设置成255(不参与计算LOSS)roi_mask = 255 - np.array(roi_mask)# 使用np.clip()方法,为叠加了manual(GT)与roi_mask后的像素设置像素的上下限mask = np.clip(manual + roi_mask, a_min=0, a_max=255)# 这里转回PIL的原因是,transforms中是对PIL数据进行处理mask = Image.fromarray(mask)if self.transforms is not None:img, mask = self.transforms(img, mask)return img, maskdef __len__(self):return len(self.img_list)@staticmethoddef collate_fn(batch):images, targets = list(zip(*batch))batched_imgs = cat_list(images, fill_value=0)batched_targets = cat_list(targets, fill_value=255)return batched_imgs, batched_targets

3. 建立UNet模型

from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import netronclass DoubleConv(nn.Sequential):def __init__(self, in_channels, out_channels, mid_channels=None):if mid_channels is None:mid_channels = out_channelssuper(DoubleConv, self).__init__(nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),nn.BatchNorm2d(mid_channels),nn.ReLU(inplace=True),nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),nn.BatchNorm2d(out_channels),nn.ReLU(inplace=True))class Down(nn.Sequential):def __init__(self, in_channels, out_channels):super(Down, self).__init__(nn.MaxPool2d(2, stride=2),DoubleConv(in_channels, out_channels))class Up(nn.Module):def __init__(self, in_channels, out_channels, bilinear=True):super(Up, self).__init__()if bilinear:self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)else:self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)self.conv = DoubleConv(in_channels, out_channels)def forward(self, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor:x1 = self.up(x1)# [N, C, H, W]diff_y = x2.size()[2] - x1.size()[2]diff_x = x2.size()[3] - x1.size()[3]# padding_left, padding_right, padding_top, padding_bottomx1 = F.pad(x1, [diff_x // 2, diff_x - diff_x // 2,diff_y // 2, diff_y - diff_y // 2])x = torch.cat([x2, x1], dim=1)x = self.conv(x)return xclass OutConv(nn.Sequential):def __init__(self, in_channels, num_classes):super(OutConv, self).__init__(nn.Conv2d(in_channels, num_classes, kernel_size=1))class UNet(nn.Module):def __init__(self,in_channels: int = 1,num_classes: int = 2,bilinear: bool = True,base_c: int = 64):super(UNet, self).__init__()self.in_channels = in_channelsself.num_classes = num_classesself.bilinear = bilinearself.in_conv = DoubleConv(in_channels, base_c)self.down1 = Down(base_c, base_c * 2)self.down2 = Down(base_c * 2, base_c * 4)self.down3 = Down(base_c * 4, base_c * 8)factor = 2 if bilinear else 1self.down4 = Down(base_c * 8, base_c * 16 // factor)self.up1 = Up(base_c * 16, base_c * 8 // factor, bilinear)self.up2 = Up(base_c * 8, base_c * 4 // factor, bilinear)self.up3 = Up(base_c * 4, base_c * 2 // factor, bilinear)self.up4 = Up(base_c * 2, base_c, bilinear)self.out_conv = OutConv(base_c, num_classes)def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:x1 = self.in_conv(x)x2 = self.down1(x1)x3 = self.down2(x2)x4 = self.down3(x3)x5 = self.down4(x4)x = self.up1(x5, x4)x = self.up2(x, x3)x = self.up3(x, x2)x = self.up4(x, x1)logits = self.out_conv(x)return {"out": logits}

4 编写损失函数和评价函数

import torch
import torch.nn as nndef build_target(target: torch.Tensor, num_classes: int = 2, ignore_index: int = -100):"""build target for dice coefficient"""dice_target = target.clone()if ignore_index >= 0:ignore_mask = torch.eq(target, ignore_index)dice_target[ignore_mask] = 0# [N, H, W] -> [N, H, W, C]dice_target = nn.functional.one_hot(dice_target, num_classes).float()dice_target[ignore_mask] = ignore_indexelse:dice_target = nn.functional.one_hot(dice_target, num_classes).float()return dice_target.permute(0, 3, 1, 2)def dice_coeff(x: torch.Tensor, target: torch.Tensor, ignore_index: int = -100, epsilon=1e-6):# Average of Dice coefficient for all batches, or for a single mask# 计算一个batch中所有图片某个类别的dice_coefficientd = 0.batch_size = x.shape[0]for i in range(batch_size):x_i = x[i].reshape(-1)t_i = target[i].reshape(-1)if ignore_index >= 0:# 找出mask中不为ignore_index的区域roi_mask = torch.ne(t_i, ignore_index)x_i = x_i[roi_mask]t_i = t_i[roi_mask]inter = torch.dot(x_i, t_i)sets_sum = torch.sum(x_i) + torch.sum(t_i)if sets_sum == 0:sets_sum = 2 * interd += (2 * inter + epsilon) / (sets_sum + epsilon)return d / batch_sizedef multiclass_dice_coeff(x: torch.Tensor, target: torch.Tensor, ignore_index: int = -100, epsilon=1e-6):"""Average of Dice coefficient for all classes"""dice = 0.for channel in range(x.shape[1]):dice += dice_coeff(x[:, channel, ...], target[:, channel, ...], ignore_index, epsilon)return dice / x.shape[1]def dice_loss(x: torch.Tensor, target: torch.Tensor, multiclass: bool = False, ignore_index: int = -100):# Dice loss (objective to minimize) between 0 and 1x = nn.functional.softmax(x, dim=1)fn = multiclass_dice_coeff if multiclass else dice_coeffreturn 1 - fn(x, target, ignore_index=ignore_index)

5.开始训练

def main(args):device = torch.device(args.device if torch.cuda.is_available() else "cpu")batch_size = args.batch_size# segmentation nun_classes + backgroundnum_classes = args.num_classes + 1# using compute_mean_std.pymean = (0.709, 0.381, 0.224)std = (0.127, 0.079, 0.043)# 用来保存训练以及验证过程中信息results_file = "results{}.txt".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))train_dataset = DriveDataset(args.data_path,train=True,transforms=get_transform(train=True, mean=mean, std=std))val_dataset = DriveDataset(args.data_path,train=False,transforms=get_transform(train=False, mean=mean, std=std))num_workers = 0train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,num_workers=num_workers,shuffle=True,pin_memory=True,collate_fn=train_dataset.collate_fn)val_loader = torch.utils.data.DataLoader(val_dataset,batch_size=1,num_workers=num_workers,pin_memory=True,collate_fn=val_dataset.collate_fn)for img,lab in train_loader:print(img.shape)print(lab.shape)model = create_model(num_classes=num_classes)model.to(device)params_to_optimize = [p for p in model.parameters() if p.requires_grad]optimizer = torch.optim.SGD(params_to_optimize,lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)scaler = torch.cuda.amp.GradScaler() if args.amp else None# 创建学习率更新策略,这里是每个step更新一次(不是每个epoch)lr_scheduler = create_lr_scheduler(optimizer, len(train_loader), args.epochs, warmup=True)if args.resume:checkpoint = torch.load(args.resume, map_location='cpu')  # load模型model.load_state_dict(checkpoint['model'])optimizer.load_state_dict(checkpoint['optimizer'])lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])args.start_epoch = checkpoint['epoch'] + 1if args.amp:scaler.load_state_dict(checkpoint["scaler"])best_dice = 0.0start_time = time.time()for epoch in range(args.start_epoch, args.epochs):mean_loss, lr = train_one_epoch(model, optimizer, train_loader, device, epoch, num_classes,lr_scheduler=lr_scheduler, print_freq=args.print_freq, scaler=scaler)confmat, dice = evaluate(model, val_loader, device=device, num_classes=num_classes)val_info = str(confmat)print(val_info)print(f"dice coefficient: {dice:.3f}")# write into txtwith open(results_file, "a") as f:# 记录每个epoch对应的train_loss、lr以及验证集各指标train_info = f"[epoch: {epoch}]\n" \f"train_loss: {mean_loss:.4f}\n" \f"lr: {lr:.6f}\n" \f"dice coefficient: {dice:.3f}\n"f.write(train_info + val_info + "\n\n")if args.save_best is True:if best_dice < dice:best_dice = diceelse:continuesave_file = {"model": model.state_dict(),"optimizer": optimizer.state_dict(),"lr_scheduler": lr_scheduler.state_dict(),"epoch": epoch,"args": args}if args.amp:save_file["scaler"] = scaler.state_dict()if args.save_best is True:torch.save(save_file, "save_weights/best_model.pth")else:torch.save(save_file, "save_weights/model_{}.pth".format(epoch))total_time = time.time() - start_timetotal_time_str = str(datetime.timedelta(seconds=int(total_time)))print("training time {}".format(total_time_str))

这篇关于基于DRIVE数据集的视网膜UNet分割的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/629087

相关文章

Python使用vllm处理多模态数据的预处理技巧

《Python使用vllm处理多模态数据的预处理技巧》本文深入探讨了在Python环境下使用vLLM处理多模态数据的预处理技巧,我们将从基础概念出发,详细讲解文本、图像、音频等多模态数据的预处理方法,... 目录1. 背景介绍1.1 目的和范围1.2 预期读者1.3 文档结构概述1.4 术语表1.4.1 核

MySQL 删除数据详解(最新整理)

《MySQL删除数据详解(最新整理)》:本文主要介绍MySQL删除数据的相关知识,本文通过实例代码给大家介绍的非常详细,对大家的学习或工作具有一定的参考借鉴价值,需要的朋友参考下吧... 目录一、前言二、mysql 中的三种删除方式1.DELETE语句✅ 基本语法: 示例:2.TRUNCATE语句✅ 基本语

MyBatisPlus如何优化千万级数据的CRUD

《MyBatisPlus如何优化千万级数据的CRUD》最近负责的一个项目,数据库表量级破千万,每次执行CRUD都像走钢丝,稍有不慎就引起数据库报警,本文就结合这个项目的实战经验,聊聊MyBatisPl... 目录背景一、MyBATis Plus 简介二、千万级数据的挑战三、优化 CRUD 的关键策略1. 查

python实现对数据公钥加密与私钥解密

《python实现对数据公钥加密与私钥解密》这篇文章主要为大家详细介绍了如何使用python实现对数据公钥加密与私钥解密,文中的示例代码讲解详细,感兴趣的小伙伴可以跟随小编一起学习一下... 目录公钥私钥的生成使用公钥加密使用私钥解密公钥私钥的生成这一部分,使用python生成公钥与私钥,然后保存在两个文

mysql中的数据目录用法及说明

《mysql中的数据目录用法及说明》:本文主要介绍mysql中的数据目录用法及说明,具有很好的参考价值,希望对大家有所帮助,如有错误或未考虑完全的地方,望不吝赐教... 目录1、背景2、版本3、数据目录4、总结1、背景安装mysql之后,在安装目录下会有一个data目录,我们创建的数据库、创建的表、插入的

Navicat数据表的数据添加,删除及使用sql完成数据的添加过程

《Navicat数据表的数据添加,删除及使用sql完成数据的添加过程》:本文主要介绍Navicat数据表的数据添加,删除及使用sql完成数据的添加过程,具有很好的参考价值,希望对大家有所帮助,如有... 目录Navicat数据表数据添加,删除及使用sql完成数据添加选中操作的表则出现如下界面,查看左下角从左

SpringBoot中4种数据水平分片策略

《SpringBoot中4种数据水平分片策略》数据水平分片作为一种水平扩展策略,通过将数据分散到多个物理节点上,有效解决了存储容量和性能瓶颈问题,下面小编就来和大家分享4种数据分片策略吧... 目录一、前言二、哈希分片2.1 原理2.2 SpringBoot实现2.3 优缺点分析2.4 适用场景三、范围分片

Redis分片集群、数据读写规则问题小结

《Redis分片集群、数据读写规则问题小结》本文介绍了Redis分片集群的原理,通过数据分片和哈希槽机制解决单机内存限制与写瓶颈问题,实现分布式存储和高并发处理,但存在通信开销大、维护复杂及对事务支持... 目录一、分片集群解android决的问题二、分片集群图解 分片集群特征如何解决的上述问题?(与哨兵模

浅析如何保证MySQL与Redis数据一致性

《浅析如何保证MySQL与Redis数据一致性》在互联网应用中,MySQL作为持久化存储引擎,Redis作为高性能缓存层,两者的组合能有效提升系统性能,下面我们来看看如何保证两者的数据一致性吧... 目录一、数据不一致性的根源1.1 典型不一致场景1.2 关键矛盾点二、一致性保障策略2.1 基础策略:更新数

Oracle 数据库数据操作如何精通 INSERT, UPDATE, DELETE

《Oracle数据库数据操作如何精通INSERT,UPDATE,DELETE》在Oracle数据库中,对表内数据进行增加、修改和删除操作是通过数据操作语言来完成的,下面给大家介绍Oracle数... 目录思维导图一、插入数据 (INSERT)1.1 插入单行数据,指定所有列的值语法:1.2 插入单行数据,指