aoxo commited on
Commit
97b31bf
·
verified ·
1 Parent(s): c7b7c49

Delete mri_autoencoder.ipynb

Browse files
Files changed (1) hide show
  1. mri_autoencoder.ipynb +0 -434
mri_autoencoder.ipynb DELETED
@@ -1,434 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "metadata": {},
7
- "outputs": [],
8
- "source": [
9
- "import os\n",
10
- "import shutil\n",
11
- "\n",
12
- "def deepcopy_and_rename_dicom(src_folder, dest_folder):\n",
13
- " # Ensure the destination folder exists\n",
14
- " if not os.path.exists(dest_folder):\n",
15
- " os.makedirs(dest_folder)\n",
16
- "\n",
17
- " # Variable to keep track of the sequential number for the renamed files\n",
18
- " file_counter = 1\n",
19
- "\n",
20
- " # Walk through the source folder and its subdirectories\n",
21
- " for root, dirs, files in os.walk(src_folder):\n",
22
- " for dicom_file in files:\n",
23
- " # Check if the file has a .dcm extension\n",
24
- " if dicom_file.lower().endswith('.dcm'):\n",
25
- " # Get the full path of the DICOM file\n",
26
- " src_file_path = os.path.join(root, dicom_file)\n",
27
- " \n",
28
- " # Generate the new file path for the destination folder\n",
29
- " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n",
30
- " \n",
31
- " # Copy the file to the destination folder and rename it\n",
32
- " shutil.copy(src_file_path, dest_file_path)\n",
33
- " print(f\"Copied and renamed {dicom_file} to {file_counter}.dcm\")\n",
34
- " \n",
35
- " # Increment the counter for the next file\n",
36
- " file_counter += 1\n",
37
- "\n",
38
- "# Example usage:\n",
39
- "src_folder = r\"D:\\Pancreatic Neuroendocrine\" # Replace with your source main folder path\n",
40
- "dest_folder = r\"D:\\PN_New\" # Replace with your destination folder path\n",
41
- "deepcopy_and_rename_dicom(src_folder, dest_folder)"
42
- ]
43
- },
44
- {
45
- "cell_type": "code",
46
- "execution_count": null,
47
- "metadata": {},
48
- "outputs": [],
49
- "source": [
50
- "import os\n",
51
- "import shutil\n",
52
- "import random\n",
53
- "\n",
54
- "def split_and_transfer_files(src_folder, dest_folder, split_factor):\n",
55
- " \"\"\"\n",
56
- " Splits the files in src_folder and moves them into train and val subfolders in dest_folder\n",
57
- " based on the provided split_factor.\n",
58
- " \n",
59
- " :param src_folder: The source folder containing the files to split.\n",
60
- " :param dest_folder: The destination folder where the train and val subfolders will be created.\n",
61
- " :param split_factor: The ratio of files to go into the train subfolder (e.g., 0.8 for 80% train, 20% val).\n",
62
- " \"\"\"\n",
63
- " # Ensure the destination folder and subfolders exist\n",
64
- " train_folder = os.path.join(dest_folder, 'train')\n",
65
- " val_folder = os.path.join(dest_folder, 'val')\n",
66
- " \n",
67
- " if not os.path.exists(train_folder):\n",
68
- " os.makedirs(train_folder)\n",
69
- " \n",
70
- " if not os.path.exists(val_folder):\n",
71
- " os.makedirs(val_folder)\n",
72
- "\n",
73
- " # Get all DICOM files in the source folder\n",
74
- " dicom_files = [f for f in os.listdir(src_folder) if f.lower().endswith('.dcm')]\n",
75
- " \n",
76
- " # Shuffle the files for randomness\n",
77
- " random.shuffle(dicom_files)\n",
78
- "\n",
79
- " # Calculate the number of files for the train and validation sets\n",
80
- " split_index = int(len(dicom_files) * split_factor)\n",
81
- " \n",
82
- " # Split the files into train and val sets\n",
83
- " train_files = dicom_files[:split_index]\n",
84
- " val_files = dicom_files[split_index:]\n",
85
- " \n",
86
- " # Move the files to the respective folders\n",
87
- " for file in train_files:\n",
88
- " src_file = os.path.join(src_folder, file)\n",
89
- " dest_file = os.path.join(train_folder, file)\n",
90
- " shutil.move(src_file, dest_file)\n",
91
- " print(f\"Moved {file} to train folder\")\n",
92
- " \n",
93
- " for file in val_files:\n",
94
- " src_file = os.path.join(src_folder, file)\n",
95
- " dest_file = os.path.join(val_folder, file)\n",
96
- " shutil.move(src_file, dest_file)\n",
97
- " print(f\"Moved {file} to val folder\")\n",
98
- "\n",
99
- "# Example usage:\n",
100
- "src_folder = r\"D:\\PN_New\" # Replace with your source folder path\n",
101
- "dest_folder = r\"D:\\PN_Split\" # Replace with your destination folder path\n",
102
- "split_factor = 0.9 # 80% of files will go to 'train', 20% will go to 'val'\n",
103
- "\n",
104
- "split_and_transfer_files(src_folder, dest_folder, split_factor)"
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "execution_count": null,
110
- "metadata": {},
111
- "outputs": [],
112
- "source": [
113
- "import torch\n",
114
- "import torch.nn as nn\n",
115
- "import torch.nn.functional as F\n",
116
- "import pydicom\n",
117
- "import numpy as np\n",
118
- "from torch.utils.data import Dataset, DataLoader\n",
119
- "import os\n",
120
- "\n",
121
- "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
122
- "\n",
123
- "class MedicalImageDataset(Dataset):\n",
124
- " def __init__(self, dicom_dir):\n",
125
- " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
126
- " \n",
127
- " def __len__(self):\n",
128
- " return len(self.dicom_files)\n",
129
- " \n",
130
- " def __getitem__(self, idx):\n",
131
- " # Read DICOM file and normalize\n",
132
- " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
133
- " image = dcm.pixel_array.astype(float)\n",
134
- " image = (image - image.min()) / (image.max() - image.min())\n",
135
- " \n",
136
- " # Convert to tensor\n",
137
- " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
138
- " return image_tensor, image_tensor\n",
139
- "\n",
140
- "class UNetBlock(nn.Module):\n",
141
- " def __init__(self, in_channels, out_channels):\n",
142
- " super().__init__()\n",
143
- " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
144
- " self.bn1 = nn.BatchNorm2d(out_channels)\n",
145
- " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
146
- " self.bn2 = nn.BatchNorm2d(out_channels)\n",
147
- " \n",
148
- " def forward(self, x):\n",
149
- " x = F.relu(self.bn1(self.conv1(x)))\n",
150
- " x = F.relu(self.bn2(self.conv2(x)))\n",
151
- " return x\n",
152
- "\n",
153
- "class UNet(nn.Module):\n",
154
- " def __init__(self, in_channels=1, out_channels=1):\n",
155
- " super().__init__()\n",
156
- " # Encoder\n",
157
- " self.enc1 = UNetBlock(in_channels, 64)\n",
158
- " self.enc2 = UNetBlock(64, 128)\n",
159
- " self.enc3 = UNetBlock(128, 256)\n",
160
- " \n",
161
- " # Decoder\n",
162
- " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
163
- " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
164
- " self.dec1 = UNetBlock(64, out_channels)\n",
165
- " \n",
166
- " # Pooling and upsampling\n",
167
- " self.pool = nn.MaxPool2d(2, 2)\n",
168
- " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
169
- " \n",
170
- " def forward(self, x):\n",
171
- " # Encoder path\n",
172
- " e1 = self.enc1(x)\n",
173
- " e2 = self.enc2(self.pool(e1))\n",
174
- " e3 = self.enc3(self.pool(e2))\n",
175
- " \n",
176
- " # Decoder path with skip connections\n",
177
- " d3 = self.upsample(e3)\n",
178
- " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
179
- " d3 = self.dec3(d3)\n",
180
- " \n",
181
- " d2 = self.upsample(d3)\n",
182
- " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
183
- " d2 = self.dec2(d2)\n",
184
- " \n",
185
- " d1 = self.dec1(d2)\n",
186
- " \n",
187
- " return d1\n",
188
- "\n",
189
- "def train_unet(dicom_dir, epochs=50, batch_size=4):\n",
190
- " # Dataset and DataLoader\n",
191
- " dataset = MedicalImageDataset(dicom_dir)\n",
192
- " dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
193
- " \n",
194
- " # Model, Loss, Optimizer\n",
195
- " model = UNet().to(device)\n",
196
- " criterion = nn.MSELoss()\n",
197
- " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
198
- " \n",
199
- " # Training loop\n",
200
- " for epoch in range(epochs):\n",
201
- " total_loss = 0\n",
202
- " for images, targets in dataloader:\n",
203
- " images, targets = images.to(device), targets.to(device)\n",
204
- " optimizer.zero_grad()\n",
205
- " outputs = model(images)\n",
206
- " loss = criterion(outputs, targets)\n",
207
- " loss.backward()\n",
208
- " optimizer.step()\n",
209
- " total_loss += loss.item()\n",
210
- " \n",
211
- " print(f'Epoch [{epoch+1}/{epochs}], Loss: {total_loss/len(dataloader):.4f}')\n",
212
- " \n",
213
- " return model\n",
214
- "\n",
215
- "# Example usage\n",
216
- "model = train_unet(r\"D:\\PN_New\", epochs=50, batch_size=1)"
217
- ]
218
- },
219
- {
220
- "cell_type": "code",
221
- "execution_count": null,
222
- "metadata": {},
223
- "outputs": [
224
- {
225
- "name": "stderr",
226
- "output_type": "stream",
227
- "text": [
228
- "Epoch 1/50: 0%| | 0/5057 [00:00<?, ?batch/s]c:\\Users\\alosh\\anaconda3\\envs\\tf\\lib\\site-packages\\torch\\utils\\checkpoint.py:460: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants.\n",
229
- " warnings.warn(\n",
230
- "c:\\Users\\alosh\\anaconda3\\envs\\tf\\lib\\site-packages\\torch\\utils\\checkpoint.py:90: UserWarning: None of the inputs have requires_grad=True. Gradients will be None\n",
231
- " warnings.warn(\n"
232
- ]
233
- }
234
- ],
235
- "source": [
236
- "import torch\n",
237
- "import torch.nn as nn\n",
238
- "import torch.nn.functional as F\n",
239
- "import pydicom\n",
240
- "import numpy as np\n",
241
- "from torch.utils.data import Dataset, DataLoader\n",
242
- "import os\n",
243
- "from torch.utils.checkpoint import checkpoint\n",
244
- "from tqdm import tqdm # Import tqdm for progress bar\n",
245
- "\n",
246
- "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
247
- "\n",
248
- "class MedicalImageDataset(Dataset):\n",
249
- " def __init__(self, dicom_dir):\n",
250
- " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
251
- " \n",
252
- " def __len__(self):\n",
253
- " return len(self.dicom_files)\n",
254
- " \n",
255
- " def __getitem__(self, idx):\n",
256
- " # Read DICOM file and normalize\n",
257
- " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
258
- " image = dcm.pixel_array.astype(float)\n",
259
- " image = (image - image.min()) / (image.max() - image.min())\n",
260
- " \n",
261
- " # Convert to tensor\n",
262
- " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
263
- " return image_tensor, image_tensor\n",
264
- "\n",
265
- "class UNetBlock(nn.Module):\n",
266
- " def __init__(self, in_channels, out_channels):\n",
267
- " super().__init__()\n",
268
- " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
269
- " self.bn1 = nn.BatchNorm2d(out_channels)\n",
270
- " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
271
- " self.bn2 = nn.BatchNorm2d(out_channels)\n",
272
- " \n",
273
- " def forward(self, x):\n",
274
- " x = F.relu(self.bn1(self.conv1(x)))\n",
275
- " x = F.relu(self.bn2(self.conv2(x)))\n",
276
- " return x\n",
277
- "\n",
278
- "class UNet(nn.Module):\n",
279
- " def __init__(self, in_channels=1, out_channels=1):\n",
280
- " super().__init__()\n",
281
- " # Encoder\n",
282
- " self.enc1 = UNetBlock(in_channels, 64)\n",
283
- " self.enc2 = UNetBlock(64, 128)\n",
284
- " self.enc3 = UNetBlock(128, 256)\n",
285
- " \n",
286
- " # Decoder\n",
287
- " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
288
- " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
289
- " self.dec1 = UNetBlock(64, out_channels)\n",
290
- " \n",
291
- " # Pooling and upsampling\n",
292
- " self.pool = nn.MaxPool2d(2, 2)\n",
293
- " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
294
- " \n",
295
- " def forward(self, x):\n",
296
- " # Encoder path\n",
297
- " e1 = checkpoint(self.enc1, x)\n",
298
- " e2 = checkpoint(self.enc2, self.pool(e1))\n",
299
- " e3 = checkpoint(self.enc3, self.pool(e2))\n",
300
- " \n",
301
- " # Decoder path with skip connections\n",
302
- " d3 = self.upsample(e3)\n",
303
- " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
304
- " d3 = checkpoint(self.dec3, d3)\n",
305
- " \n",
306
- " d2 = self.upsample(d3)\n",
307
- " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
308
- " d2 = checkpoint(self.dec2, d2)\n",
309
- " \n",
310
- " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
311
- " \n",
312
- " return d1\n",
313
- "\n",
314
- "def calculate_loss(model, dataloader, criterion):\n",
315
- " model.eval()\n",
316
- " total_loss = 0\n",
317
- " with torch.no_grad():\n",
318
- " for images, targets in dataloader:\n",
319
- " images, targets = images.to(device), targets.to(device)\n",
320
- " outputs = model(images)\n",
321
- " loss = criterion(outputs, targets)\n",
322
- " total_loss += loss.item()\n",
323
- " return total_loss / len(dataloader)\n",
324
- "\n",
325
- "def calculate_psnr(output, target, max_pixel=1.0):\n",
326
- " # Ensure the values are in the correct range\n",
327
- " mse = F.mse_loss(output, target)\n",
328
- " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
329
- " return psnr.item()\n",
330
- "\n",
331
- "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
332
- " model.eval()\n",
333
- " total_loss = 0\n",
334
- " total_psnr = 0\n",
335
- " num_batches = len(dataloader)\n",
336
- " \n",
337
- " with torch.no_grad():\n",
338
- " for images, targets in dataloader:\n",
339
- " images, targets = images.to(device), targets.to(device)\n",
340
- " outputs = model(images)\n",
341
- " \n",
342
- " # Calculate MSE loss\n",
343
- " loss = criterion(outputs, targets)\n",
344
- " total_loss += loss.item()\n",
345
- " \n",
346
- " # Calculate PSNR\n",
347
- " psnr = calculate_psnr(outputs, targets)\n",
348
- " total_psnr += psnr\n",
349
- " \n",
350
- " avg_loss = total_loss / num_batches\n",
351
- " avg_psnr = total_psnr / num_batches\n",
352
- " \n",
353
- " return avg_loss, avg_psnr\n",
354
- "\n",
355
- "best_val_loss = float('inf')\n",
356
- "best_model_path = 'best_model.pth'\n",
357
- "\n",
358
- "def train_unet(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
359
- " # Dataset and DataLoader\n",
360
- " dataset = MedicalImageDataset(dicom_dir)\n",
361
- " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
362
- " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
363
- " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
364
- " \n",
365
- " # Model, Loss, Optimizer\n",
366
- " model = UNet().to(device)\n",
367
- " criterion = nn.MSELoss()\n",
368
- " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
369
- " \n",
370
- " # Training loop with tqdm\n",
371
- " for epoch in range(epochs):\n",
372
- " model.train()\n",
373
- " total_loss = 0\n",
374
- " optimizer.zero_grad()\n",
375
- " \n",
376
- " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
377
- " for i, (images, targets) in enumerate(tepoch):\n",
378
- " images, targets = images.to(device), targets.to(device)\n",
379
- " \n",
380
- " # Forward pass\n",
381
- " outputs = model(images)\n",
382
- " loss = criterion(outputs, targets)\n",
383
- " loss.backward()\n",
384
- " \n",
385
- " # Gradient accumulation\n",
386
- " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
387
- " optimizer.step()\n",
388
- " optimizer.zero_grad()\n",
389
- " \n",
390
- " total_loss += loss.item()\n",
391
- " \n",
392
- " # Update the tqdm progress bar with the current loss\n",
393
- " tepoch.set_postfix(loss=total_loss / ((i + 1) * batch_size))\n",
394
- " \n",
395
- " avg_train_loss = total_loss / len(train_dataloader)\n",
396
- " avg_val_loss, avg_val_psnr = calculate_loss_and_psnr(model, val_dataloader, criterion)\n",
397
- " \n",
398
- " print(f\"Epoch [{epoch+1}/{epochs}] - Train Loss: {avg_train_loss:.4f}, Validation Loss: {avg_val_loss:.4f}, Validation PSNR: {avg_val_psnr:.4f}\")\n",
399
- "\n",
400
- " # Save model if validation loss is improved\n",
401
- " if avg_val_loss < best_val_loss:\n",
402
- " best_val_loss = avg_val_loss\n",
403
- " torch.save(model.state_dict(), best_model_path)\n",
404
- " print(f\"Model saved with improved validation loss: {avg_val_loss:.4f}\")\n",
405
- " \n",
406
- " return model\n",
407
- "\n",
408
- "# Example usage with train and validation directories\n",
409
- "model = train_unet(r\"D:\\PN_Split\\train\", r\"D:\\PN_Split\\val\", epochs=50, batch_size=4, grad_accumulation_steps=8)"
410
- ]
411
- }
412
- ],
413
- "metadata": {
414
- "kernelspec": {
415
- "display_name": "tf",
416
- "language": "python",
417
- "name": "python3"
418
- },
419
- "language_info": {
420
- "codemirror_mode": {
421
- "name": "ipython",
422
- "version": 3
423
- },
424
- "file_extension": ".py",
425
- "mimetype": "text/x-python",
426
- "name": "python",
427
- "nbconvert_exporter": "python",
428
- "pygments_lexer": "ipython3",
429
- "version": "3.10.11"
430
- }
431
- },
432
- "nbformat": 4,
433
- "nbformat_minor": 2
434
- }