aoxo commited on
Commit
3ebecbd
·
verified ·
1 Parent(s): 3c70d2b

Delete mri_autoencoder.ipynb

Browse files
Files changed (1) hide show
  1. mri_autoencoder.ipynb +0 -393
mri_autoencoder.ipynb DELETED
@@ -1,393 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "metadata": {},
7
- "outputs": [],
8
- "source": [
9
- "import os\n",
10
- "import shutil\n",
11
- "\n",
12
- "def deepcopy_and_rename_dicom(src_folder, dest_folder):\n",
13
- " # Ensure the destination folder exists\n",
14
- " if not os.path.exists(dest_folder):\n",
15
- " os.makedirs(dest_folder)\n",
16
- "\n",
17
- " # Variable to keep track of the sequential number for the renamed files\n",
18
- " file_counter = 1\n",
19
- "\n",
20
- " # Walk through the source folder and its subdirectories\n",
21
- " for root, dirs, files in os.walk(src_folder):\n",
22
- " for dicom_file in files:\n",
23
- " # Check if the file has a .dcm extension\n",
24
- " if dicom_file.lower().endswith('.dcm'):\n",
25
- " # Get the full path of the DICOM file\n",
26
- " src_file_path = os.path.join(root, dicom_file)\n",
27
- " \n",
28
- " # Generate the new file path for the destination folder\n",
29
- " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n",
30
- " \n",
31
- " # Copy the file to the destination folder and rename it\n",
32
- " shutil.copy(src_file_path, dest_file_path)\n",
33
- " print(f\"Copied and renamed {dicom_file} to {file_counter}.dcm\")\n",
34
- " \n",
35
- " # Increment the counter for the next file\n",
36
- " file_counter += 1\n",
37
- "\n",
38
- "# Example usage:\n",
39
- "src_folder = r\"D:\\Pancreatic Neuroendocrine\" # Replace with your source main folder path\n",
40
- "dest_folder = r\"D:\\PN_New\" # Replace with your destination folder path\n",
41
- "deepcopy_and_rename_dicom(src_folder, dest_folder)"
42
- ]
43
- },
44
- {
45
- "cell_type": "code",
46
- "execution_count": null,
47
- "metadata": {},
48
- "outputs": [],
49
- "source": [
50
- "import os\n",
51
- "import shutil\n",
52
- "import random\n",
53
- "\n",
54
- "def split_and_transfer_files(src_folder, dest_folder, split_factor):\n",
55
- " \"\"\"\n",
56
- " Splits the files in src_folder and moves them into train and val subfolders in dest_folder\n",
57
- " based on the provided split_factor.\n",
58
- " \n",
59
- " :param src_folder: The source folder containing the files to split.\n",
60
- " :param dest_folder: The destination folder where the train and val subfolders will be created.\n",
61
- " :param split_factor: The ratio of files to go into the train subfolder (e.g., 0.8 for 80% train, 20% val).\n",
62
- " \"\"\"\n",
63
- " # Ensure the destination folder and subfolders exist\n",
64
- " train_folder = os.path.join(dest_folder, 'train')\n",
65
- " val_folder = os.path.join(dest_folder, 'val')\n",
66
- " \n",
67
- " if not os.path.exists(train_folder):\n",
68
- " os.makedirs(train_folder)\n",
69
- " \n",
70
- " if not os.path.exists(val_folder):\n",
71
- " os.makedirs(val_folder)\n",
72
- "\n",
73
- " # Get all DICOM files in the source folder\n",
74
- " dicom_files = [f for f in os.listdir(src_folder) if f.lower().endswith('.dcm')]\n",
75
- " \n",
76
- " # Shuffle the files for randomness\n",
77
- " random.shuffle(dicom_files)\n",
78
- "\n",
79
- " # Calculate the number of files for the train and validation sets\n",
80
- " split_index = int(len(dicom_files) * split_factor)\n",
81
- " \n",
82
- " # Split the files into train and val sets\n",
83
- " train_files = dicom_files[:split_index]\n",
84
- " val_files = dicom_files[split_index:]\n",
85
- " \n",
86
- " # Move the files to the respective folders\n",
87
- " for file in train_files:\n",
88
- " src_file = os.path.join(src_folder, file)\n",
89
- " dest_file = os.path.join(train_folder, file)\n",
90
- " shutil.move(src_file, dest_file)\n",
91
- " print(f\"Moved {file} to train folder\")\n",
92
- " \n",
93
- " for file in val_files:\n",
94
- " src_file = os.path.join(src_folder, file)\n",
95
- " dest_file = os.path.join(val_folder, file)\n",
96
- " shutil.move(src_file, dest_file)\n",
97
- " print(f\"Moved {file} to val folder\")\n",
98
- "\n",
99
- "# Example usage:\n",
100
- "src_folder = r\"D:\\PN_New\" # Replace with your source folder path\n",
101
- "dest_folder = r\"D:\\PN_Split\" # Replace with your destination folder path\n",
102
- "split_factor = 0.9 # 80% of files will go to 'train', 20% will go to 'val'\n",
103
- "\n",
104
- "split_and_transfer_files(src_folder, dest_folder, split_factor)"
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "execution_count": null,
110
- "metadata": {},
111
- "outputs": [],
112
- "source": [
113
- "import torch\n",
114
- "import torch.nn as nn\n",
115
- "import torch.nn.functional as F\n",
116
- "import pydicom\n",
117
- "import numpy as np\n",
118
- "from torch.utils.data import Dataset, DataLoader\n",
119
- "import os\n",
120
- "\n",
121
- "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
122
- "\n",
123
- "class MedicalImageDataset(Dataset):\n",
124
- " def __init__(self, dicom_dir):\n",
125
- " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
126
- " \n",
127
- " def __len__(self):\n",
128
- " return len(self.dicom_files)\n",
129
- " \n",
130
- " def __getitem__(self, idx):\n",
131
- " # Read DICOM file and normalize\n",
132
- " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
133
- " image = dcm.pixel_array.astype(float)\n",
134
- " image = (image - image.min()) / (image.max() - image.min())\n",
135
- " \n",
136
- " # Convert to tensor\n",
137
- " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
138
- " return image_tensor, image_tensor\n",
139
- "\n",
140
- "class UNetBlock(nn.Module):\n",
141
- " def __init__(self, in_channels, out_channels):\n",
142
- " super().__init__()\n",
143
- " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
144
- " self.bn1 = nn.BatchNorm2d(out_channels)\n",
145
- " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
146
- " self.bn2 = nn.BatchNorm2d(out_channels)\n",
147
- " \n",
148
- " def forward(self, x):\n",
149
- " x = F.relu(self.bn1(self.conv1(x)))\n",
150
- " x = F.relu(self.bn2(self.conv2(x)))\n",
151
- " return x\n",
152
- "\n",
153
- "class UNet(nn.Module):\n",
154
- " def __init__(self, in_channels=1, out_channels=1):\n",
155
- " super().__init__()\n",
156
- " # Encoder\n",
157
- " self.enc1 = UNetBlock(in_channels, 64)\n",
158
- " self.enc2 = UNetBlock(64, 128)\n",
159
- " self.enc3 = UNetBlock(128, 256)\n",
160
- " \n",
161
- " # Decoder\n",
162
- " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
163
- " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
164
- " self.dec1 = UNetBlock(64, out_channels)\n",
165
- " \n",
166
- " # Pooling and upsampling\n",
167
- " self.pool = nn.MaxPool2d(2, 2)\n",
168
- " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
169
- " \n",
170
- " def forward(self, x):\n",
171
- " # Encoder path\n",
172
- " e1 = self.enc1(x)\n",
173
- " e2 = self.enc2(self.pool(e1))\n",
174
- " e3 = self.enc3(self.pool(e2))\n",
175
- " \n",
176
- " # Decoder path with skip connections\n",
177
- " d3 = self.upsample(e3)\n",
178
- " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
179
- " d3 = self.dec3(d3)\n",
180
- " \n",
181
- " d2 = self.upsample(d3)\n",
182
- " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
183
- " d2 = self.dec2(d2)\n",
184
- " \n",
185
- " d1 = self.dec1(d2)\n",
186
- " \n",
187
- " return d1\n",
188
- "\n",
189
- "def train_unet(dicom_dir, epochs=50, batch_size=4):\n",
190
- " # Dataset and DataLoader\n",
191
- " dataset = MedicalImageDataset(dicom_dir)\n",
192
- " dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
193
- " \n",
194
- " # Model, Loss, Optimizer\n",
195
- " model = UNet().to(device)\n",
196
- " criterion = nn.MSELoss()\n",
197
- " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
198
- " \n",
199
- " # Training loop\n",
200
- " for epoch in range(epochs):\n",
201
- " total_loss = 0\n",
202
- " for images, targets in dataloader:\n",
203
- " images, targets = images.to(device), targets.to(device)\n",
204
- " optimizer.zero_grad()\n",
205
- " outputs = model(images)\n",
206
- " loss = criterion(outputs, targets)\n",
207
- " loss.backward()\n",
208
- " optimizer.step()\n",
209
- " total_loss += loss.item()\n",
210
- " \n",
211
- " print(f'Epoch [{epoch+1}/{epochs}], Loss: {total_loss/len(dataloader):.4f}')\n",
212
- " \n",
213
- " return model\n",
214
- "\n",
215
- "# Example usage\n",
216
- "model = train_unet(r\"D:\\PN_New\", epochs=50, batch_size=1)"
217
- ]
218
- },
219
- {
220
- "cell_type": "code",
221
- "execution_count": null,
222
- "metadata": {},
223
- "outputs": [],
224
- "source": [
225
- "import torch\n",
226
- "import torch.nn as nn\n",
227
- "import torch.nn.functional as F\n",
228
- "import pydicom\n",
229
- "import numpy as np\n",
230
- "from torch.utils.data import Dataset, DataLoader\n",
231
- "import os\n",
232
- "from torch.utils.checkpoint import checkpoint\n",
233
- "from tqdm import tqdm # Import tqdm for progress bar\n",
234
- "\n",
235
- "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
236
- "\n",
237
- "class MedicalImageDataset(Dataset):\n",
238
- " def __init__(self, dicom_dir):\n",
239
- " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
240
- " \n",
241
- " def __len__(self):\n",
242
- " return len(self.dicom_files)\n",
243
- " \n",
244
- " def __getitem__(self, idx):\n",
245
- " # Read DICOM file and normalize\n",
246
- " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
247
- " image = dcm.pixel_array.astype(float)\n",
248
- " image = (image - image.min()) / (image.max() - image.min())\n",
249
- " \n",
250
- " # Convert to tensor\n",
251
- " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
252
- " return image_tensor, image_tensor\n",
253
- "\n",
254
- "class UNetBlock(nn.Module):\n",
255
- " def __init__(self, in_channels, out_channels):\n",
256
- " super().__init__()\n",
257
- " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
258
- " self.bn1 = nn.BatchNorm2d(out_channels)\n",
259
- " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
260
- " self.bn2 = nn.BatchNorm2d(out_channels)\n",
261
- " \n",
262
- " def forward(self, x):\n",
263
- " x = F.relu(self.bn1(self.conv1(x)))\n",
264
- " x = F.relu(self.bn2(self.conv2(x)))\n",
265
- " return x\n",
266
- "\n",
267
- "class UNet(nn.Module):\n",
268
- " def __init__(self, in_channels=1, out_channels=1):\n",
269
- " super().__init__()\n",
270
- " # Encoder\n",
271
- " self.enc1 = UNetBlock(in_channels, 64)\n",
272
- " self.enc2 = UNetBlock(64, 128)\n",
273
- " self.enc3 = UNetBlock(128, 256)\n",
274
- " \n",
275
- " # Decoder\n",
276
- " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
277
- " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
278
- " self.dec1 = UNetBlock(64, out_channels)\n",
279
- " \n",
280
- " # Pooling and upsampling\n",
281
- " self.pool = nn.MaxPool2d(2, 2)\n",
282
- " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
283
- " \n",
284
- " def forward(self, x):\n",
285
- " # Encoder path\n",
286
- " e1 = checkpoint(self.enc1, x)\n",
287
- " e2 = checkpoint(self.enc2, self.pool(e1))\n",
288
- " e3 = checkpoint(self.enc3, self.pool(e2))\n",
289
- " \n",
290
- " # Decoder path with skip connections\n",
291
- " d3 = self.upsample(e3)\n",
292
- " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
293
- " d3 = checkpoint(self.dec3, d3)\n",
294
- " \n",
295
- " d2 = self.upsample(d3)\n",
296
- " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
297
- " d2 = checkpoint(self.dec2, d2)\n",
298
- " \n",
299
- " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
300
- " \n",
301
- " return d1\n",
302
- "\n",
303
- "def calculate_loss(model, dataloader, criterion):\n",
304
- " model.eval()\n",
305
- " total_loss = 0\n",
306
- " with torch.no_grad():\n",
307
- " for images, targets in dataloader:\n",
308
- " images, targets = images.to(device), targets.to(device)\n",
309
- " outputs = model(images)\n",
310
- " loss = criterion(outputs, targets)\n",
311
- " total_loss += loss.item()\n",
312
- " return total_loss / len(dataloader)\n",
313
- "\n",
314
- "best_val_loss = float('inf')\n",
315
- "best_model_path = 'best_model.pth'\n",
316
- "\n",
317
- "def train_unet(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
318
- " # Dataset and DataLoader\n",
319
- " dataset = MedicalImageDataset(dicom_dir)\n",
320
- " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
321
- " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
322
- " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
323
- " \n",
324
- " # Model, Loss, Optimizer\n",
325
- " model = UNet().to(device)\n",
326
- " criterion = nn.MSELoss()\n",
327
- " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
328
- " \n",
329
- " # Training loop with tqdm\n",
330
- " for epoch in range(epochs):\n",
331
- " model.train()\n",
332
- " total_loss = 0\n",
333
- " optimizer.zero_grad()\n",
334
- " \n",
335
- " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
336
- " for i, (images, targets) in enumerate(tepoch):\n",
337
- " images, targets = images.to(device), targets.to(device)\n",
338
- " \n",
339
- " # Forward pass\n",
340
- " outputs = model(images)\n",
341
- " loss = criterion(outputs, targets)\n",
342
- " loss.backward()\n",
343
- " \n",
344
- " # Gradient accumulation\n",
345
- " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
346
- " optimizer.step()\n",
347
- " optimizer.zero_grad()\n",
348
- " \n",
349
- " total_loss += loss.item()\n",
350
- " \n",
351
- " # Update the tqdm progress bar with the current loss\n",
352
- " tepoch.set_postfix(loss=total_loss / ((i + 1) * batch_size))\n",
353
- " \n",
354
- " avg_train_loss = total_loss / len(train_dataloader)\n",
355
- " avg_val_loss = calculate_loss(model, val_dataloader, criterion)\n",
356
- " \n",
357
- " print(f\"Epoch [{epoch+1}/{epochs}] - Train Loss: {avg_train_loss:.4f}, Validation Loss: {avg_val_loss:.4f}\")\n",
358
- "\n",
359
- " # Save model if validation loss is improved\n",
360
- " if avg_val_loss < best_val_loss:\n",
361
- " best_val_loss = avg_val_loss\n",
362
- " torch.save(model.state_dict(), best_model_path)\n",
363
- " print(f\"Model saved with improved validation loss: {avg_val_loss:.4f}\")\n",
364
- " \n",
365
- " return model\n",
366
- "\n",
367
- "# Example usage with train and validation directories\n",
368
- "model = train_unet(r\"D:\\PN_Split\\train\", r\"D:\\PN_Split\\val\", epochs=50, batch_size=4, grad_accumulation_steps=8)"
369
- ]
370
- }
371
- ],
372
- "metadata": {
373
- "kernelspec": {
374
- "display_name": "tf",
375
- "language": "python",
376
- "name": "python3"
377
- },
378
- "language_info": {
379
- "codemirror_mode": {
380
- "name": "ipython",
381
- "version": 3
382
- },
383
- "file_extension": ".py",
384
- "mimetype": "text/x-python",
385
- "name": "python",
386
- "nbconvert_exporter": "python",
387
- "pygments_lexer": "ipython3",
388
- "version": "3.10.11"
389
- }
390
- },
391
- "nbformat": 4,
392
- "nbformat_minor": 2
393
- }