aoxo commited on
Commit
560d099
·
verified ·
1 Parent(s): ad34158

Upload mri_autoencoder.ipynb

Browse files
Files changed (1) hide show
  1. mri_autoencoder.ipynb +719 -0
mri_autoencoder.ipynb ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import shutil\n",
11
+ "\n",
12
+ "def deepcopy_and_rename_dicom(src_folder, dest_folder):\n",
13
+ " # Ensure the destination folder exists\n",
14
+ " if not os.path.exists(dest_folder):\n",
15
+ " os.makedirs(dest_folder)\n",
16
+ "\n",
17
+ " # Variable to keep track of the sequential number for the renamed files\n",
18
+ " file_counter = 1\n",
19
+ "\n",
20
+ " # Walk through the source folder and its subdirectories\n",
21
+ " for root, dirs, files in os.walk(src_folder):\n",
22
+ " for dicom_file in files:\n",
23
+ " # Check if the file has a .dcm extension\n",
24
+ " if dicom_file.lower().endswith('.dcm'):\n",
25
+ " # Get the full path of the DICOM file\n",
26
+ " src_file_path = os.path.join(root, dicom_file)\n",
27
+ " \n",
28
+ " # Generate the new file path for the destination folder\n",
29
+ " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n",
30
+ " \n",
31
+ " # Copy the file to the destination folder and rename it\n",
32
+ " shutil.copy(src_file_path, dest_file_path)\n",
33
+ " print(f\"Copied and renamed {dicom_file} to {file_counter}.dcm\")\n",
34
+ " \n",
35
+ " # Increment the counter for the next file\n",
36
+ " file_counter += 1\n",
37
+ "\n",
38
+ "# Example usage:\n",
39
+ "src_folder = r\"D:\\Pancreatic Neuroendocrine\" # Replace with your source main folder path\n",
40
+ "dest_folder = r\"D:\\PN_New\" # Replace with your destination folder path\n",
41
+ "deepcopy_and_rename_dicom(src_folder, dest_folder)"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {},
48
+ "outputs": [],
49
+ "source": [
50
+ "import os\n",
51
+ "import shutil\n",
52
+ "import random\n",
53
+ "\n",
54
+ "def split_and_transfer_files(src_folder, dest_folder, split_factor):\n",
55
+ " \"\"\"\n",
56
+ " Splits the files in src_folder and moves them into train and val subfolders in dest_folder\n",
57
+ " based on the provided split_factor.\n",
58
+ " \n",
59
+ " :param src_folder: The source folder containing the files to split.\n",
60
+ " :param dest_folder: The destination folder where the train and val subfolders will be created.\n",
61
+ " :param split_factor: The ratio of files to go into the train subfolder (e.g., 0.8 for 80% train, 20% val).\n",
62
+ " \"\"\"\n",
63
+ " # Ensure the destination folder and subfolders exist\n",
64
+ " train_folder = os.path.join(dest_folder, 'train')\n",
65
+ " val_folder = os.path.join(dest_folder, 'val')\n",
66
+ " \n",
67
+ " if not os.path.exists(train_folder):\n",
68
+ " os.makedirs(train_folder)\n",
69
+ " \n",
70
+ " if not os.path.exists(val_folder):\n",
71
+ " os.makedirs(val_folder)\n",
72
+ "\n",
73
+ " # Get all DICOM files in the source folder\n",
74
+ " dicom_files = [f for f in os.listdir(src_folder) if f.lower().endswith('.dcm')]\n",
75
+ " \n",
76
+ " # Shuffle the files for randomness\n",
77
+ " random.shuffle(dicom_files)\n",
78
+ "\n",
79
+ " # Calculate the number of files for the train and validation sets\n",
80
+ " split_index = int(len(dicom_files) * split_factor)\n",
81
+ " \n",
82
+ " # Split the files into train and val sets\n",
83
+ " train_files = dicom_files[:split_index]\n",
84
+ " val_files = dicom_files[split_index:]\n",
85
+ " \n",
86
+ " # Move the files to the respective folders\n",
87
+ " for file in train_files:\n",
88
+ " src_file = os.path.join(src_folder, file)\n",
89
+ " dest_file = os.path.join(train_folder, file)\n",
90
+ " shutil.move(src_file, dest_file)\n",
91
+ " print(f\"Moved {file} to train folder\")\n",
92
+ " \n",
93
+ " for file in val_files:\n",
94
+ " src_file = os.path.join(src_folder, file)\n",
95
+ " dest_file = os.path.join(val_folder, file)\n",
96
+ " shutil.move(src_file, dest_file)\n",
97
+ " print(f\"Moved {file} to val folder\")\n",
98
+ "\n",
99
+ "# Example usage:\n",
100
+ "src_folder = r\"D:\\PN_New\" # Replace with your source folder path\n",
101
+ "dest_folder = r\"D:\\PN_Split\" # Replace with your destination folder path\n",
102
+ "split_factor = 0.9 # 80% of files will go to 'train', 20% will go to 'val'\n",
103
+ "\n",
104
+ "split_and_transfer_files(src_folder, dest_folder, split_factor)"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "code",
109
+ "execution_count": null,
110
+ "metadata": {},
111
+ "outputs": [],
112
+ "source": [
113
+ "import torch\n",
114
+ "import torch.nn as nn\n",
115
+ "import torch.nn.functional as F\n",
116
+ "import pydicom\n",
117
+ "import numpy as np\n",
118
+ "from torch.utils.data import Dataset, DataLoader\n",
119
+ "import os\n",
120
+ "\n",
121
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
122
+ "\n",
123
+ "class MedicalImageDataset(Dataset):\n",
124
+ " def __init__(self, dicom_dir):\n",
125
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
126
+ " \n",
127
+ " def __len__(self):\n",
128
+ " return len(self.dicom_files)\n",
129
+ " \n",
130
+ " def __getitem__(self, idx):\n",
131
+ " # Read DICOM file and normalize\n",
132
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
133
+ " image = dcm.pixel_array.astype(float)\n",
134
+ " image = (image - image.min()) / (image.max() - image.min())\n",
135
+ " \n",
136
+ " # Convert to tensor\n",
137
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
138
+ " return image_tensor, image_tensor\n",
139
+ "\n",
140
+ "class UNetBlock(nn.Module):\n",
141
+ " def __init__(self, in_channels, out_channels):\n",
142
+ " super().__init__()\n",
143
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
144
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
145
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
146
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
147
+ " \n",
148
+ " def forward(self, x):\n",
149
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
150
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
151
+ " return x\n",
152
+ "\n",
153
+ "class UNet(nn.Module):\n",
154
+ " def __init__(self, in_channels=1, out_channels=1):\n",
155
+ " super().__init__()\n",
156
+ " # Encoder\n",
157
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
158
+ " self.enc2 = UNetBlock(64, 128)\n",
159
+ " self.enc3 = UNetBlock(128, 256)\n",
160
+ " \n",
161
+ " # Decoder\n",
162
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
163
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
164
+ " self.dec1 = UNetBlock(64, out_channels)\n",
165
+ " \n",
166
+ " # Pooling and upsampling\n",
167
+ " self.pool = nn.MaxPool2d(2, 2)\n",
168
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
169
+ " \n",
170
+ " def forward(self, x):\n",
171
+ " # Encoder path\n",
172
+ " e1 = self.enc1(x)\n",
173
+ " e2 = self.enc2(self.pool(e1))\n",
174
+ " e3 = self.enc3(self.pool(e2))\n",
175
+ " \n",
176
+ " # Decoder path with skip connections\n",
177
+ " d3 = self.upsample(e3)\n",
178
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
179
+ " d3 = self.dec3(d3)\n",
180
+ " \n",
181
+ " d2 = self.upsample(d3)\n",
182
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
183
+ " d2 = self.dec2(d2)\n",
184
+ " \n",
185
+ " d1 = self.dec1(d2)\n",
186
+ " \n",
187
+ " return d1\n",
188
+ "\n",
189
+ "def train_unet(dicom_dir, epochs=50, batch_size=4):\n",
190
+ " # Dataset and DataLoader\n",
191
+ " dataset = MedicalImageDataset(dicom_dir)\n",
192
+ " dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
193
+ " \n",
194
+ " # Model, Loss, Optimizer\n",
195
+ " model = UNet().to(device)\n",
196
+ " criterion = nn.MSELoss()\n",
197
+ " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
198
+ " \n",
199
+ " # Training loop\n",
200
+ " for epoch in range(epochs):\n",
201
+ " total_loss = 0\n",
202
+ " for images, targets in dataloader:\n",
203
+ " images, targets = images.to(device), targets.to(device)\n",
204
+ " optimizer.zero_grad()\n",
205
+ " outputs = model(images)\n",
206
+ " loss = criterion(outputs, targets)\n",
207
+ " loss.backward()\n",
208
+ " optimizer.step()\n",
209
+ " total_loss += loss.item()\n",
210
+ " \n",
211
+ " print(f'Epoch [{epoch+1}/{epochs}], Loss: {total_loss/len(dataloader):.4f}')\n",
212
+ " \n",
213
+ " return model\n",
214
+ "\n",
215
+ "# Example usage\n",
216
+ "model = train_unet(r\"D:\\PN_New\", epochs=50, batch_size=1)"
217
+ ]
218
+ },
219
+ {
220
+ "cell_type": "markdown",
221
+ "metadata": {},
222
+ "source": [
223
+ "### Pure U-Net"
224
+ ]
225
+ },
226
+ {
227
+ "cell_type": "code",
228
+ "execution_count": null,
229
+ "metadata": {},
230
+ "outputs": [],
231
+ "source": [
232
+ "import torch\n",
233
+ "import torch.nn as nn\n",
234
+ "import torch.nn.functional as F\n",
235
+ "import pydicom\n",
236
+ "import numpy as np\n",
237
+ "from torch.utils.data import Dataset, DataLoader\n",
238
+ "import os\n",
239
+ "from torch.utils.checkpoint import checkpoint\n",
240
+ "from tqdm import tqdm # Import tqdm for progress bar\n",
241
+ "\n",
242
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
243
+ "\n",
244
+ "class MedicalImageDataset(Dataset):\n",
245
+ " def __init__(self, dicom_dir):\n",
246
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
247
+ " \n",
248
+ " def __len__(self):\n",
249
+ " return len(self.dicom_files)\n",
250
+ " \n",
251
+ " def __getitem__(self, idx):\n",
252
+ " # Read DICOM file and normalize\n",
253
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
254
+ " image = dcm.pixel_array.astype(float)\n",
255
+ " image = (image - image.min()) / (image.max() - image.min())\n",
256
+ " \n",
257
+ " # Convert to tensor\n",
258
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
259
+ " return image_tensor, image_tensor\n",
260
+ "\n",
261
+ "class UNetBlock(nn.Module):\n",
262
+ " def __init__(self, in_channels, out_channels):\n",
263
+ " super().__init__()\n",
264
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
265
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
266
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
267
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
268
+ " \n",
269
+ " def forward(self, x):\n",
270
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
271
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
272
+ " return x\n",
273
+ "\n",
274
+ "class UNet(nn.Module):\n",
275
+ " def __init__(self, in_channels=1, out_channels=1):\n",
276
+ " super().__init__()\n",
277
+ " # Encoder\n",
278
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
279
+ " self.enc2 = UNetBlock(64, 128)\n",
280
+ " self.enc3 = UNetBlock(128, 256)\n",
281
+ " \n",
282
+ " # Decoder\n",
283
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
284
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
285
+ " self.dec1 = UNetBlock(64, out_channels)\n",
286
+ " \n",
287
+ " # Pooling and upsampling\n",
288
+ " self.pool = nn.MaxPool2d(2, 2)\n",
289
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
290
+ " \n",
291
+ " def forward(self, x):\n",
292
+ " # Encoder path\n",
293
+ " e1 = checkpoint(self.enc1, x)\n",
294
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
295
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
296
+ " \n",
297
+ " # Decoder path with skip connections\n",
298
+ " d3 = self.upsample(e3)\n",
299
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
300
+ " d3 = checkpoint(self.dec3, d3)\n",
301
+ " \n",
302
+ " d2 = self.upsample(d3)\n",
303
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
304
+ " d2 = checkpoint(self.dec2, d2)\n",
305
+ " \n",
306
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
307
+ " \n",
308
+ " return d1\n",
309
+ "\n",
310
+ "def calculate_loss(model, dataloader, criterion):\n",
311
+ " model.eval()\n",
312
+ " total_loss = 0\n",
313
+ " with torch.no_grad():\n",
314
+ " for images, targets in dataloader:\n",
315
+ " images, targets = images.to(device), targets.to(device)\n",
316
+ " outputs = model(images)\n",
317
+ " loss = criterion(outputs, targets)\n",
318
+ " total_loss += loss.item()\n",
319
+ " return total_loss / len(dataloader)\n",
320
+ "\n",
321
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
322
+ " # Ensure the values are in the correct range\n",
323
+ " mse = F.mse_loss(output, target)\n",
324
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
325
+ " return psnr.item()\n",
326
+ "\n",
327
+ "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
328
+ " model.eval()\n",
329
+ " total_loss = 0\n",
330
+ " total_psnr = 0\n",
331
+ " num_batches = len(dataloader)\n",
332
+ " \n",
333
+ " with torch.no_grad():\n",
334
+ " for images, targets in dataloader:\n",
335
+ " images, targets = images.to(device), targets.to(device)\n",
336
+ " outputs = model(images)\n",
337
+ " \n",
338
+ " # Calculate MSE loss\n",
339
+ " loss = criterion(outputs, targets)\n",
340
+ " total_loss += loss.item()\n",
341
+ " \n",
342
+ " # Calculate PSNR\n",
343
+ " psnr = calculate_psnr(outputs, targets)\n",
344
+ " total_psnr += psnr\n",
345
+ " \n",
346
+ " avg_loss = total_loss / num_batches\n",
347
+ " avg_psnr = total_psnr / num_batches\n",
348
+ " \n",
349
+ " return avg_loss, avg_psnr\n",
350
+ "\n",
351
+ "best_val_loss = float('inf')\n",
352
+ "best_model_path = 'best_model.pth'\n",
353
+ "\n",
354
+ "def train_unet(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
355
+ " # Dataset and DataLoader\n",
356
+ " dataset = MedicalImageDataset(dicom_dir)\n",
357
+ " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
358
+ " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
359
+ " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
360
+ " \n",
361
+ " # Model, Loss, Optimizer\n",
362
+ " model = UNet().to(device)\n",
363
+ " criterion = nn.MSELoss()\n",
364
+ " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
365
+ " \n",
366
+ " # Training loop with tqdm\n",
367
+ " for epoch in range(epochs):\n",
368
+ " model.train()\n",
369
+ " total_loss = 0\n",
370
+ " optimizer.zero_grad()\n",
371
+ " \n",
372
+ " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
373
+ " for i, (images, targets) in enumerate(tepoch):\n",
374
+ " images, targets = images.to(device), targets.to(device)\n",
375
+ " \n",
376
+ " # Forward pass\n",
377
+ " outputs = model(images)\n",
378
+ " loss = criterion(outputs, targets)\n",
379
+ " loss.backward()\n",
380
+ " \n",
381
+ " # Gradient accumulation\n",
382
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
383
+ " optimizer.step()\n",
384
+ " optimizer.zero_grad()\n",
385
+ " \n",
386
+ " total_loss += loss.item()\n",
387
+ " \n",
388
+ " # Update the tqdm progress bar with the current loss\n",
389
+ " tepoch.set_postfix(loss=total_loss / ((i + 1) * batch_size))\n",
390
+ " \n",
391
+ " avg_train_loss = total_loss / len(train_dataloader)\n",
392
+ " avg_val_loss, avg_val_psnr = calculate_loss_and_psnr(model, val_dataloader, criterion)\n",
393
+ " \n",
394
+ " print(f\"Epoch [{epoch+1}/{epochs}] - Train Loss: {avg_train_loss:.4f}, Validation Loss: {avg_val_loss:.4f}, Validation PSNR: {avg_val_psnr:.4f}\")\n",
395
+ "\n",
396
+ " if avg_val_loss < best_val_loss:\n",
397
+ " best_val_loss = avg_val_loss\n",
398
+ " torch.save(model.state_dict(), best_model_path)\n",
399
+ " print(f\"Model saved with improved validation loss: {avg_val_loss:.4f}\")\n",
400
+ " \n",
401
+ " return model\n",
402
+ "\n",
403
+ "# Example usage with train and validation directories\n",
404
+ "model = train_unet(r\"D:\\PN_Split\\train\", r\"D:\\PN_Split\\val\", epochs=50, batch_size=4, grad_accumulation_steps=8)"
405
+ ]
406
+ },
407
+ {
408
+ "cell_type": "markdown",
409
+ "metadata": {},
410
+ "source": [
411
+ "### Reconstructor and Denoiser U-Net"
412
+ ]
413
+ },
414
+ {
415
+ "cell_type": "code",
416
+ "execution_count": null,
417
+ "metadata": {},
418
+ "outputs": [],
419
+ "source": [
420
+ "import torch\n",
421
+ "import torch.nn as nn\n",
422
+ "import torch.nn.functional as F\n",
423
+ "import pydicom\n",
424
+ "import numpy as np\n",
425
+ "from torch.utils.data import Dataset, DataLoader\n",
426
+ "import os\n",
427
+ "from torch.utils.checkpoint import checkpoint\n",
428
+ "from tqdm import tqdm # Import tqdm for progress bar\n",
429
+ "\n",
430
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
431
+ "\n",
432
+ "class MedicalImageDataset(Dataset):\n",
433
+ " def __init__(self, dicom_dir):\n",
434
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
435
+ " \n",
436
+ " def __len__(self):\n",
437
+ " return len(self.dicom_files)\n",
438
+ " \n",
439
+ " def __getitem__(self, idx):\n",
440
+ " # Read DICOM file and normalize\n",
441
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
442
+ " image = dcm.pixel_array.astype(float)\n",
443
+ " image = (image - image.min()) / (image.max() - image.min())\n",
444
+ " \n",
445
+ " # Convert to tensor\n",
446
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
447
+ " return image_tensor, image_tensor\n",
448
+ "\n",
449
+ "class UNetBlock(nn.Module):\n",
450
+ " def __init__(self, in_channels, out_channels):\n",
451
+ " super().__init__()\n",
452
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
453
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
454
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
455
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
456
+ " \n",
457
+ " def forward(self, x):\n",
458
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
459
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
460
+ " return x\n",
461
+ "\n",
462
+ "class UNet(nn.Module):\n",
463
+ " def __init__(self, in_channels=1, out_channels=1):\n",
464
+ " super().__init__()\n",
465
+ " # Encoder\n",
466
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
467
+ " self.enc2 = UNetBlock(64, 128)\n",
468
+ " self.enc3 = UNetBlock(128, 256)\n",
469
+ " \n",
470
+ " # Decoder\n",
471
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
472
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
473
+ " self.dec1 = UNetBlock(64, out_channels)\n",
474
+ " \n",
475
+ " # Pooling and upsampling\n",
476
+ " self.pool = nn.MaxPool2d(2, 2)\n",
477
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
478
+ " \n",
479
+ " def forward(self, x):\n",
480
+ " # Encoder path\n",
481
+ " e1 = checkpoint(self.enc1, x)\n",
482
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
483
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
484
+ " \n",
485
+ " # Decoder path with skip connections\n",
486
+ " d3 = self.upsample(e3)\n",
487
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
488
+ " d3 = checkpoint(self.dec3, d3)\n",
489
+ " \n",
490
+ " d2 = self.upsample(d3)\n",
491
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
492
+ " d2 = checkpoint(self.dec2, d2)\n",
493
+ " \n",
494
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
495
+ " \n",
496
+ " return d1\n",
497
+ "\n",
498
+ "def calculate_loss(model, dataloader, criterion):\n",
499
+ " model.eval()\n",
500
+ " total_loss = 0\n",
501
+ " with torch.no_grad():\n",
502
+ " for images, targets in dataloader:\n",
503
+ " images, targets = images.to(device), targets.to(device)\n",
504
+ " outputs = model(images)\n",
505
+ " loss = criterion(outputs, targets)\n",
506
+ " total_loss += loss.item()\n",
507
+ " return total_loss / len(dataloader)\n",
508
+ "\n",
509
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
510
+ " # Ensure the values are in the correct range\n",
511
+ " mse = F.mse_loss(output, target)\n",
512
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
513
+ " return psnr.item()\n",
514
+ "\n",
515
+ "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
516
+ " model.eval()\n",
517
+ " total_loss = 0\n",
518
+ " total_psnr = 0\n",
519
+ " num_batches = len(dataloader)\n",
520
+ " \n",
521
+ " with torch.no_grad():\n",
522
+ " for images, targets in dataloader:\n",
523
+ " images, targets = images.to(device), targets.to(device)\n",
524
+ " outputs = model(images)\n",
525
+ " \n",
526
+ " # Calculate MSE loss\n",
527
+ " loss = criterion(outputs, targets)\n",
528
+ " total_loss += loss.item()\n",
529
+ " \n",
530
+ " # Calculate PSNR\n",
531
+ " psnr = calculate_psnr(outputs, targets)\n",
532
+ " total_psnr += psnr\n",
533
+ " \n",
534
+ " avg_loss = total_loss / num_batches\n",
535
+ " avg_psnr = total_psnr / num_batches\n",
536
+ " \n",
537
+ " return avg_loss, avg_psnr\n",
538
+ "\n",
539
+ "class UNet(nn.Module):\n",
540
+ " def __init__(self, in_channels=1, out_channels=1):\n",
541
+ " super().__init__()\n",
542
+ " # Encoder\n",
543
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
544
+ " self.enc2 = UNetBlock(64, 128)\n",
545
+ " self.enc3 = UNetBlock(128, 256)\n",
546
+ " \n",
547
+ " # Decoder\n",
548
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
549
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
550
+ " self.dec1 = UNetBlock(64, out_channels)\n",
551
+ " \n",
552
+ " # Pooling and upsampling\n",
553
+ " self.pool = nn.MaxPool2d(2, 2)\n",
554
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
555
+ " \n",
556
+ " def forward(self, x):\n",
557
+ " # Encoder path\n",
558
+ " e1 = checkpoint(self.enc1, x)\n",
559
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
560
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
561
+ " \n",
562
+ " # Decoder path with skip connections\n",
563
+ " d3 = self.upsample(e3)\n",
564
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
565
+ " d3 = checkpoint(self.dec3, d3)\n",
566
+ " \n",
567
+ " d2 = self.upsample(d3)\n",
568
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
569
+ " d2 = checkpoint(self.dec2, d2)\n",
570
+ " \n",
571
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
572
+ " \n",
573
+ " return d1\n",
574
+ "\n",
575
+ "class Reconstructor(nn.Module):\n",
576
+ " def __init__(self, in_channels=1, out_channels=1):\n",
577
+ " super().__init__()\n",
578
+ " # Same UNet architecture for reconstruction\n",
579
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
580
+ " \n",
581
+ " def forward(self, x):\n",
582
+ " return self.unet(x)\n",
583
+ "\n",
584
+ "\n",
585
+ "class Denoiser(nn.Module):\n",
586
+ " def __init__(self, in_channels=1, out_channels=1):\n",
587
+ " super().__init__()\n",
588
+ " # Same UNet architecture for denoising\n",
589
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
590
+ " \n",
591
+ " def forward(self, x):\n",
592
+ " return self.unet(x)\n",
593
+ " \n",
594
+ "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
595
+ " # Dataset and DataLoader\n",
596
+ " dataset = MedicalImageDataset(dicom_dir)\n",
597
+ " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
598
+ " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
599
+ " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
600
+ " \n",
601
+ " # Initialize both models\n",
602
+ " reconstructor = Reconstructor().to(device)\n",
603
+ " denoiser = Denoiser().to(device)\n",
604
+ " \n",
605
+ " # Loss functions for both models\n",
606
+ " reconstructor_criterion = nn.MSELoss()\n",
607
+ " denoiser_criterion = nn.MSELoss()\n",
608
+ " \n",
609
+ " # Optimizers for both models\n",
610
+ " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n",
611
+ " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n",
612
+ " \n",
613
+ " # Best validation loss initialization\n",
614
+ " best_reconstructor_val_loss = float('inf')\n",
615
+ " best_denoiser_val_loss = float('inf')\n",
616
+ " best_reconstructor_model_path = 'best_reconstructor_model.pth'\n",
617
+ " best_denoiser_model_path = 'best_denoiser_model.pth'\n",
618
+ "\n",
619
+ " # Training loop with tqdm\n",
620
+ " for epoch in range(epochs):\n",
621
+ " reconstructor.train()\n",
622
+ " denoiser.train()\n",
623
+ " \n",
624
+ " reconstructor_total_loss = 0\n",
625
+ " denoiser_total_loss = 0\n",
626
+ " \n",
627
+ " reconstructor_optimizer.zero_grad()\n",
628
+ " denoiser_optimizer.zero_grad()\n",
629
+ "\n",
630
+ " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
631
+ " for i, (images, targets) in enumerate(tepoch):\n",
632
+ " images, targets = images.to(device), targets.to(device)\n",
633
+ " \n",
634
+ " # Training Reconstructor\n",
635
+ " reconstructor_outputs = reconstructor(images)\n",
636
+ " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n",
637
+ " reconstructor_loss.backward(retain_graph=True)\n",
638
+ "\n",
639
+ " # Gradient accumulation for reconstructor\n",
640
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
641
+ " reconstructor_optimizer.step()\n",
642
+ " reconstructor_optimizer.zero_grad()\n",
643
+ "\n",
644
+ " reconstructor_total_loss += reconstructor_loss.item()\n",
645
+ "\n",
646
+ " # Training Denoiser (using output from Reconstructor as noisy input)\n",
647
+ " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n",
648
+ " denoiser_outputs = denoiser(noisy_images)\n",
649
+ " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n",
650
+ " denoiser_loss.backward()\n",
651
+ "\n",
652
+ " # Gradient accumulation for denoiser\n",
653
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
654
+ " denoiser_optimizer.step()\n",
655
+ " denoiser_optimizer.zero_grad()\n",
656
+ "\n",
657
+ " denoiser_total_loss += denoiser_loss.item()\n",
658
+ "\n",
659
+ " # Update the tqdm progress bar with current loss\n",
660
+ " tepoch.set_postfix(\n",
661
+ " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n",
662
+ " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n",
663
+ " )\n",
664
+ " \n",
665
+ " # Calculate validation loss for both models\n",
666
+ " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n",
667
+ " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n",
668
+ " \n",
669
+ " avg_reconstructor_val_loss, _ = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n",
670
+ " avg_denoiser_val_loss, _ = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n",
671
+ " \n",
672
+ " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n",
673
+ " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n",
674
+ " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n",
675
+ " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n",
676
+ " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}\")\n",
677
+ " \n",
678
+ " # Save models if validation loss is improved\n",
679
+ " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n",
680
+ " best_reconstructor_val_loss = avg_reconstructor_val_loss\n",
681
+ " torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n",
682
+ " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n",
683
+ " \n",
684
+ " if avg_denoiser_val_loss < best_denoiser_val_loss:\n",
685
+ " best_denoiser_val_loss = avg_denoiser_val_loss\n",
686
+ " torch.save(denoiser.state_dict(), best_denoiser_model_path)\n",
687
+ " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n",
688
+ " \n",
689
+ " return reconstructor, denoiser\n",
690
+ "\n",
691
+ "# Example usage with train and validation directories\n",
692
+ "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n",
693
+ " r\"D:\\PN_Split\\train\", r\"D:\\PN_Split\\val\", epochs=50, batch_size=1, grad_accumulation_steps=8\n",
694
+ ")"
695
+ ]
696
+ }
697
+ ],
698
+ "metadata": {
699
+ "kernelspec": {
700
+ "display_name": "tf",
701
+ "language": "python",
702
+ "name": "python3"
703
+ },
704
+ "language_info": {
705
+ "codemirror_mode": {
706
+ "name": "ipython",
707
+ "version": 3
708
+ },
709
+ "file_extension": ".py",
710
+ "mimetype": "text/x-python",
711
+ "name": "python",
712
+ "nbconvert_exporter": "python",
713
+ "pygments_lexer": "ipython3",
714
+ "version": "3.10.11"
715
+ }
716
+ },
717
+ "nbformat": 4,
718
+ "nbformat_minor": 2
719
+ }