diff --git a/GAN_model.py b/GAN_model.py deleted file mode 100644 index b4eff11c8c86c2d3063de38e99875879cd63e05f..0000000000000000000000000000000000000000 --- a/GAN_model.py +++ /dev/null @@ -1,234 +0,0 @@ -import tensorflow as tf -import random -import os -import numpy as np -from tensorflow.keras import layers -from tqdm import tqdm -from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard - - -#You can change the id for each run so that all models and stats are saved separately. -name_id = "GAN_test" -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Hyperparameters -z_dim = 100 -input_shape = (256, 256, 6) - -def build_generator(): - input_image = layers.Input(shape=input_shape, name='input_image') - z = layers.Input(shape=(z_dim,), name='z') - - # Transform the noise - z_transformed = layers.Dense(256*256*6, activation='relu')(z) - z_transformed = layers.Reshape((256, 256, 6))(z_transformed) - - # Concatenate the transformed noise and input image - combined = layers.Concatenate(axis=-1)([input_image, z_transformed]) - - x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu')(combined) - x = layers.BatchNormalization()(x) - - x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same', activation='relu')(x) - x = layers.BatchNormalization()(x) - - # Generate a segmentation map - output = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='output')(x) - - model = tf.keras.Model(inputs=[input_image, z], outputs=output) - return model - -# Discriminator -def build_discriminator(): - input_image = layers.Input(shape=input_shape, name='input_image') - segmentation_map = layers.Input(shape=(256, 256, 1), name='segmentation_map') - - combined = layers.Concatenate()([input_image, segmentation_map]) - - x = layers.Conv2D(32, (3, 3), strides=(2, 2), padding='same', activation='relu')(combined) - x = layers.BatchNormalization()(x) - - x = layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same', activation='relu')(x) - x = layers.BatchNormalization()(x) - - x = layers.Flatten()(x) - output = layers.Dense(1, activation='sigmoid')(x) - - model = tf.keras.Model(inputs=[input_image, segmentation_map], outputs=output) - return model - -generator = build_generator() -discriminator = build_discriminator() - -# Losses and optimizers -bce_loss = tf.keras.losses.BinaryCrossentropy() -optimizer_g = tf.keras.optimizers.Adam(1e-4) -optimizer_d = tf.keras.optimizers.Adam(1e-4) - -# Define training steps -@tf.function -def train_step(images, masks, batch_size): - # Generate noise for generator - noise = tf.random.normal([tf.shape(images)[0], z_dim]) - - with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: - # Generate fake segmentation maps using generator - generated_masks = generator([images, noise], training=True) - - # Discriminator output for real and fake images - real_output = discriminator([images, masks], training=True) - fake_output = discriminator([images, generated_masks], training=True) - - # Generator loss: Adversarial loss + L1 loss for generated mask - gen_loss = bce_loss(tf.ones_like(fake_output), fake_output) - l1_loss = tf.reduce_mean(tf.abs(masks - generated_masks)) - total_gen_loss = gen_loss + (100.0 * l1_loss) - - # Discriminator loss - real_loss = bce_loss(tf.ones_like(real_output), real_output) - fake_loss = bce_loss(tf.zeros_like(fake_output), fake_output) - disc_loss = (real_loss + fake_loss) * 0.5 - - # Calculate gradients and apply updates - gradients_of_generator = gen_tape.gradient(total_gen_loss, generator.trainable_variables) - gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) - - optimizer_g.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) - optimizer_d.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) - - return total_gen_loss, disc_loss - -# Helper function to compute F1 score -def compute_f1_score(true_masks, pred_masks): - pred_masks_bin = tf.cast(pred_masks > 0.5, tf.float32) - - TP = tf.reduce_sum(true_masks * pred_masks_bin) - FP = tf.reduce_sum((1 - true_masks) * pred_masks_bin) - FN = tf.reduce_sum(true_masks * (1 - pred_masks_bin)) - F1 = (2 * TP) / (2 * TP + FP + FN + 1e-7) - return F1.numpy() - -def dice_coef(y_true, y_pred, smooth=1.0): - intersection = tf.reduce_sum(y_true * y_pred, axis=[1,2,3]) - union = tf.reduce_sum(y_true + y_pred, axis=[1,2,3]) - return tf.reduce_mean((2. * intersection + smooth) / (union + smooth), axis=0) - -def dice_coef_loss(y_true, y_pred): - return 1 - dice_coef(y_true, y_pred) - - -generator.compile(optimizer=optimizer_g, loss=dice_coef_loss) -discriminator.compile(optimizer=optimizer_d, loss=bce_loss) - -def train(dataset, val_dataset, epochs, batch_size): - history = {'train_loss': [], 'val_loss': [], 'f1_score': []} - - # Initialize the best_val_loss with a high value - best_val_loss = float('inf') - - for epoch in tqdm(range(epochs), desc="Training"): - train_losses = [] # store training losses for this epoch - - # Training - for image_batch, mask_batch in dataset: - gen_loss, disc_loss = train_step(image_batch, mask_batch, batch_size) # Assuming you already have the `train_step` function - train_losses.append(gen_loss) - - # Validation - total_f1_score = 0 - val_losses = [] # store validation losses for this epoch - - for val_image_batch, val_mask_batch in val_dataset: - # Generate segmentation masks using the generator - noise = tf.random.normal([val_image_batch.shape[0], z_dim]) - val_pred_masks = generator([val_image_batch, noise], training=False) - - # Compute validation loss and F1 score - val_loss = dice_coef_loss(val_mask_batch, val_pred_masks) # Implement your own loss_function or use a suitable one from TF - total_f1_score += compute_f1_score(val_mask_batch, val_pred_masks) - - val_losses.append(val_loss.numpy()) - - # Averaging metrics - avg_train_loss = np.mean(train_losses) - avg_val_loss = np.mean(val_losses) - avg_f1_score = total_f1_score / len(val_dataset) - - history['train_loss'].append(avg_train_loss) - history['val_loss'].append(avg_val_loss) - history['f1_score'].append(avg_f1_score) - - # Check if this epoch's val_loss is better than the best so far - if avg_val_loss < best_val_loss: - best_val_loss = avg_val_loss - generator.save(os.path.join(model_path, name_id + '_generator.h5')) - - return history - - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values=[map_img, legend_img]) - map_img = map_img*2.0 - 1.0 - map_img = tf.image.resize(map_img, [256, 256]) - - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return map_img, seg_img - -def load_train_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/training/poly/map_patches', - '/projects/bbym/shared/all_patched_data/training/poly/legend', - '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - -train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -random.shuffle(train_map_file) - -# Pre-filter map files based on existence of corresponding legend files -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in train_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/training/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -train(train_dataset, validate_dataset, epochs=100, batch_size=128) diff --git a/VAE-unet.py b/VAE-unet.py deleted file mode 100644 index 34fea008cf313e12d42b311f97ec4f65682af69d..0000000000000000000000000000000000000000 --- a/VAE-unet.py +++ /dev/null @@ -1,214 +0,0 @@ -import tensorflow as tf -import random -import os -import shutil -from keras import backend as K -from tensorflow.keras.utils import plot_model -from tensorflow.keras import layers, Model -from tensorflow.keras.optimizers import Adam -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard - -def sampling(args): - """Reparameterization trick.""" - z_mean, z_log_var = args - batch, height, width, channels = tf.shape(z_mean)[0], tf.shape(z_mean)[1], tf.shape(z_mean)[2], tf.shape(z_mean)[3] - epsilon = tf.keras.backend.random_normal(shape=(batch, height, width, channels)) - return z_mean + tf.exp(0.5 * z_log_var) * epsilon - -def attention_block(x, g, inter_channel): - """Attention block. `x` is the local feature and `g` is the wider context.""" - theta_x = layers.Conv2D(inter_channel, (1, 1), strides=(1, 1))(x) - - phi_g = layers.Conv2D(inter_channel, (1, 1), strides=(1, 1))(g) - phi_g = layers.UpSampling2D(size=(2, 2))(phi_g) - - f = layers.Add()([theta_x, phi_g]) - f = layers.Activation('relu')(f) - - psi_f = layers.Conv2D(1, (1, 1), strides=(1, 1))(f) - psi_f = layers.Activation('sigmoid')(psi_f) - - return layers.Multiply()([x, psi_f]) - -def encoder_block(inputs, filters, attention=False, pool=True): - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(inputs) - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(x) - if pool: - if attention: - g = layers.MaxPooling2D(pool_size=(2, 2))(x) - x = attention_block(x, g, filters//2) - return x, layers.MaxPooling2D(pool_size=(2, 2))(x) - else: - return x, layers.MaxPooling2D(pool_size=(2, 2))(x) - else: - return x - -def decoder_block(inputs, skip_features, filters): - x = layers.Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(inputs) - x = layers.Concatenate()([x, skip_features]) - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(x) - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(x) - return x - -def variational_unet(input_shape1, input_shape2): - # Encoder 1 - inputs1 = layers.Input(shape=input_shape1, name="legend_patch") - x1_1, p1_1 = encoder_block(inputs1, 64) - x1_2, p1_2 = encoder_block(p1_1, 128) - x1_3, p1_3 = encoder_block(p1_2, 256) - x1_4, p1_4 = encoder_block(p1_3, 512) - x1_5 = encoder_block(p1_4, 1024, pool=False) - - # Latent Space for Encoder 1 - z_mean = layers.Conv2D(1024, (1, 1))(x1_5) - z_log_var = layers.Conv2D(1024, (1, 1))(x1_5) - z = layers.Lambda(sampling)([z_mean, z_log_var]) - - # Encoder 2 with attention - inputs2 = layers.Input(shape=input_shape2, name="map_patch") - x2_1, p2_1 = encoder_block(inputs2, 64, attention=True) - x2_2, p2_2 = encoder_block(p2_1, 128, attention=True) - x2_3, p2_3 = encoder_block(p2_2, 256, attention=True) - x2_4, p2_4 = encoder_block(p2_3, 512, attention=True) - x2 = encoder_block(p2_4, 1024, attention=True, pool=False) - - # Concatenate at the bottleneck - bottleneck = layers.Concatenate()([z, x2]) - - print(bottleneck.shape, z.shape, x2.shape) - - # Decoder - x = decoder_block(bottleneck, x2_4, 512) - x = decoder_block(x, x2_3, 256) - x = decoder_block(x, x2_2, 128) - x = decoder_block(x, x2_1, 64) - - outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(x) - - return Model(inputs=[inputs1, inputs2], outputs=[outputs, [outputs, z_mean, z_log_var]]) - -def f1_score(y_true, y_pred): # Dice coefficient - smooth = 1. - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) - - -def vae_loss(y_true, output, beta=1.0): - y_pred, z_mean, z_log_var = output[0], output[1], output[2] - # Reconstruction loss - recon_loss = f1_score(y_true, y_pred) - recon_loss = tf.reduce_mean(recon_loss) - - # KL divergence loss - kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var) - kl_loss = -0.5 * tf.reduce_sum(kl_loss, axis=-1) - - # Total loss - return recon_loss + beta * kl_loss - -model = variational_unet((256, 256, 3), (256, 256, 3)) -model.summary() -model.compile(optimizer='adam', loss=[None, vae_loss]) - - -# Assuming 'model' is your instantiated model -plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) - -# def load_img(filename, map_dir, legend_dir, seg_dir): -# mapName = tf.strings.join([map_dir, filename[0]], separator='/') -# legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - -# # Load and preprocess map_img -# map_img = tf.io.read_file(mapName) -# map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 -# map_img = tf.image.resize(map_img, [256, 256]) - -# # Load and preprocess legend_img -# legend_img = tf.io.read_file(legendName) -# legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 -# legend_img = tf.image.resize(legend_img, [256, 256]) - -# # Load and preprocess seg_img -# segName = tf.strings.join([seg_dir, filename[0]], separator='/') -# seg_img = tf.io.read_file(segName) -# seg_img = tf.io.decode_png(seg_img) -# seg_img = tf.image.resize(seg_img, [256, 256]) - -# return (legend_img, map_img), seg_img - -# def load_train_img(filename): -# return load_img(filename, -# '/projects/bbym/shared/all_patched_data/training/poly/map_patches', -# '/projects/bbym/shared/all_patched_data/training/poly/legend', -# '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -# def load_validation_img(filename): -# return load_img(filename, -# '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', -# '/projects/bbym/shared/all_patched_data/validation/poly/legend', -# '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - - -# train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -# random.shuffle(train_map_file) - -# # Pre-filter map files based on existence of corresponding legend files -# train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') -# for x in train_map_file -# if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/training/poly/legend', -# '_'.join(x.split('_')[0:-2])+'.png'))] - -# train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -# train_dataset = train_dataset.map(load_train_img) -# train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -# validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# # Pre-filter map files based on existence of corresponding legend files -# validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') -# for x in validate_map_file -# if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', -# '_'.join(x.split('_')[0:-2])+'.png'))] - -# validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -# validate_dataset = validate_dataset.map(load_validation_img) -# validate_dataset = validate_dataset.batch(50) - -# ################################################################ -# ##### Prepare the model configurations ######################### -# ################################################################ -# #You can change the id for each run so that all models and stats are saved separately. -# name_id = "VAE-unet" -# prediction_path = './predicts_'+name_id+'/' -# log_path = './logs_'+name_id+'/' -# model_path = './models_'+name_id+'/' -# save_model_path = './models_'+name_id+'/' - -# # Create the folder if it does not exist -# os.makedirs(model_path, exist_ok=True) -# os.makedirs(prediction_path, exist_ok=True) - -# name = 'VAE-unet' - -# logdir = log_path + name - -# if(os.path.isdir(logdir)): -# shutil.rmtree(logdir) - -# os.makedirs(logdir, exist_ok=True) -# tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -# print('model location: '+ model_path+name+'.h5') -# # define hyperparameters and callback modules -# patience = 10 -# maxepoch = 500 -# callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), -# EarlyStopping(monitor='val_loss', patience=patience, verbose=0), -# ModelCheckpoint(model_path+name+'.h5', monitor='val_loss', save_best_only=True, verbose=0), -# TensorBoard(log_dir=logdir)] - -# train_history = model.fit(train_dataset, validation_data = validate_dataset, -# batch_size = 16, epochs = maxepoch, verbose=1, -# callbacks = callbacks) \ No newline at end of file diff --git a/VAE-unet_updated.py b/VAE-unet_updated.py deleted file mode 100644 index 8370f3eb3abd25a940587a34bdbc110fa7070b2c..0000000000000000000000000000000000000000 --- a/VAE-unet_updated.py +++ /dev/null @@ -1,215 +0,0 @@ -import tensorflow as tf -import random -import os -import shutil -from keras import backend as K -from tensorflow.keras.utils import plot_model -from tensorflow.keras import layers, Model -from tensorflow.keras.optimizers import Adam -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard - -def sampling(args): - """Reparameterization trick.""" - z_mean, z_log_var = args - batch, height, width, channels = tf.shape(z_mean)[0], tf.shape(z_mean)[1], tf.shape(z_mean)[2], tf.shape(z_mean)[3] - epsilon = tf.keras.backend.random_normal(shape=(batch, height, width, channels)) - return z_mean + tf.exp(0.5 * z_log_var) * epsilon - -def attention_block(x, g, inter_channel): - """Attention block. `x` is the local feature and `g` is the wider context.""" - theta_x = layers.Conv2D(inter_channel, (1, 1), strides=(1, 1))(x) - - phi_g = layers.Conv2D(inter_channel, (1, 1), strides=(1, 1))(g) - phi_g = layers.UpSampling2D(size=(2, 2))(phi_g) - - f = layers.Add()([theta_x, phi_g]) - f = layers.Activation('relu')(f) - - psi_f = layers.Conv2D(1, (1, 1), strides=(1, 1))(f) - psi_f = layers.Activation('sigmoid')(psi_f) - - return layers.Multiply()([x, psi_f]) - -def encoder_block(inputs, filters, attention=False, pool=True): - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(inputs) - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(x) - if pool: - if attention: - g = layers.MaxPooling2D(pool_size=(2, 2))(x) - x = attention_block(x, g, filters//2) - return x, layers.MaxPooling2D(pool_size=(2, 2))(x) - else: - return x, layers.MaxPooling2D(pool_size=(2, 2))(x) - else: - return x - -def decoder_block(inputs, skip_features, filters): - x = layers.Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(inputs) - x = layers.Concatenate()([x, skip_features]) - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(x) - x = layers.Conv2D(filters, (3, 3), activation='relu', padding='same')(x) - return x - -def variational_unet(input_shape1, input_shape2): - # Encoder 1 - inputs1 = layers.Input(shape=input_shape1, name="legend_patch") - x1_1, p1_1 = encoder_block(inputs1, 64) - x1_2, p1_2 = encoder_block(p1_1, 128) - x1_3, p1_3 = encoder_block(p1_2, 256) - x1_4, p1_4 = encoder_block(p1_3, 512) - x1_5 = encoder_block(p1_4, 1024, pool=False) - - # Latent Space for Encoder 1 - # z_mean = layers.Conv2D(1024, (1, 1))(x1_5) - # z_log_var = layers.Conv2D(1024, (1, 1))(x1_5) - # z = layers.Lambda(sampling)([z_mean, z_log_var]) - - # Encoder 2 with attention - inputs2 = layers.Input(shape=input_shape2, name="map_patch") - x2_1, p2_1 = encoder_block(inputs2, 64, attention=True) - x2_2, p2_2 = encoder_block(p2_1, 128, attention=True) - x2_3, p2_3 = encoder_block(p2_2, 256, attention=True) - x2_4, p2_4 = encoder_block(p2_3, 512, attention=True) - x2_5 = encoder_block(p2_4, 1024, attention=True, pool=False) - - # Concatenate at the bottleneck - bottleneck = layers.Concatenate()([x1_5, x2_5]) - - print(bottleneck.shape, x1_5.shape, x2_5.shape) - - # Decoder - x = decoder_block(bottleneck, x2_4, 512) - x = decoder_block(x, x2_3, 256) - x = decoder_block(x, x2_2, 128) - x = decoder_block(x, x2_1, 64) - - outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(x) - - return Model(inputs=[inputs1, inputs2], outputs=[outputs]) - -# Use dice coefficient function as the loss function -def dice_coef(y_true, y_pred): - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (2.0 * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0) - -# Jacard coefficient -def jacard_coef(y_true, y_pred): - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0) - -# calculate loss value -def jacard_coef_loss(y_true, y_pred): - return -jacard_coef(y_true, y_pred) - -# calculate loss value -def dice_coef_loss(y_true, y_pred): - return -dice_coef(y_true, y_pred) - -model = variational_unet((256, 256, 3), (256, 256, 3)) -# model.summary() -model.compile(optimizer='adam', loss = dice_coef_loss, metrics=[dice_coef,'accuracy']) - - -# Assuming 'model' is your instantiated model -plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - # Load and preprocess map_img - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - map_img = tf.image.resize(map_img, [256, 256]) - - # Load and preprocess legend_img - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - legend_img = tf.image.resize(legend_img, [256, 256]) - - # Load and preprocess seg_img - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return (legend_img, map_img), seg_img - -def load_train_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/training/poly/map_patches', - '/projects/bbym/shared/all_patched_data/training/poly/legend', - '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - - -train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -random.shuffle(train_map_file) - -# Pre-filter map files based on existence of corresponding legend files -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in train_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/training/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -#You can change the id for each run so that all models and stats are saved separately. -name_id = "VAE-unet_dice_coeff" -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -name = name_id - -logdir = log_path + name_id - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience, verbose=0), - ModelCheckpoint(model_path+name+'.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, - batch_size = 16, epochs = maxepoch, verbose=1, - callbacks = callbacks) \ No newline at end of file diff --git a/create_prediction_map.py b/create_prediction_map.py deleted file mode 100644 index c85d9c11871a5be343d596f1c2e5103246f30713..0000000000000000000000000000000000000000 --- a/create_prediction_map.py +++ /dev/null @@ -1,119 +0,0 @@ - -import os -import numpy as np -import rasterio -import tensorflow as tf -from PIL import Image -# import segmentation_models as sm -# from keras.models import load_model -from tensorflow.keras.models import load_model - -from data_util import DataLoader -from unet_util import (UNET_224, Residual_CNN_block, - attention_up_and_concatenate, - attention_up_and_concatenate2, dice_coef, - dice_coef_loss, evaluate_prediction_result, jacard_coef, - multiplication, multiplication2) - -# Set the limit to a larger value than the default -Image.MAX_IMAGE_PIXELS = 200000000 # For example, allow up to 200 million pixels - -def load_image_and_predict(map_file_name, prediction_path, model): - """ - Load map image, find corresponding legend images, create inputs, predict, and reconstruct images. - - Parameters: - map_file_name (str): Name of the map image file (e.g., 'AR_Maumee.tif'). - prediction_path (str): Path to save the predicted image. - """ - # Set the paths - map_dir = '/projects/bbym/shared/data/cma/validation/' - map_img_path = os.path.join(map_dir, map_file_name) - json_file_name = os.path.splitext(map_file_name)[0] + '.json' - json_file_path = os.path.join(map_dir, json_file_name) - - patch_size=(256, 256, 3) - overlap=30 - - # Instantiate DataLoader and get processed data - data_loader = DataLoader(map_img_path, json_file_path, patch_size, overlap) - processed_data = data_loader.get_processed_data() - - # 'poly_legends', 'pt_legends', 'line_legends' - for legend in ['poly_legends']: - for legend_img, legend_label in processed_data[legend]: - # Convert legend_img back to uint8 and scale values to 0-255 - legend_img_uint8 = tf.cast(legend_img * 255, dtype=tf.uint8).numpy() - legend_img_pil = Image.fromarray(legend_img_uint8) - - output_legend_img_path = os.path.join(prediction_path, f"{os.path.splitext(map_file_name)[0]}_{legend_label}.png") - legend_img_pil.save(output_legend_img_path, 'PNG') - - map_patches = processed_data['map_patches'] - total_row, total_col, _, _, _, _ = map_patches.shape - predicted_patches = np.zeros((total_row, total_col, patch_size[0], patch_size[1])) - - for i in range(total_row): - for j in range(total_col): - single_map_patch = map_patches[i, j, :, :][0] - - # Concatenate along the third axis and normalize - input_patch = tf.concat(axis=2, values=[single_map_patch, legend_img]) - input_patch = input_patch * 2.0 - 1.0 - - # Resize the input patch - input_patch_resized = tf.image.resize(input_patch, patch_size[:2]) - - # Expand dimensions for prediction - input_patch_expanded = tf.expand_dims(input_patch_resized, axis=0) - - # Make prediction and store it - predicted_patch = model.predict(input_patch_expanded, verbose = 0) - predicted_patches[i, j, :, :] = predicted_patch.squeeze() - - reconstructed_image = data_loader.reconstruct_data(predicted_patches) - reconstructed_image = (reconstructed_image * 255).astype(np.uint8) - - output_image_path = os.path.join(prediction_path, f"{os.path.splitext(map_file_name)[0]}_{legend_label}.tif") - - with rasterio.open(map_img_path) as src: - metadata = src.meta - - metadata.update({ - 'dtype': 'uint8', - 'count': 1, - 'height': reconstructed_image.shape[0], - 'width': reconstructed_image.shape[1], - 'compress': 'lzw', - }) - - with rasterio.open(output_image_path, 'w', **metadata) as dst: - dst.write(reconstructed_image, 1) - - print(f"Predicted image saved at: {output_image_path}") - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = 'unproecessed_legends' #You can change the id for each run so that all models and stats are saved separately. -prediction_path = './predicts_'+name_id+'/' -model_path = './models_'+name_id+'/' - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' -name = 'Unet-'+ backend - -finetune = False -if (finetune): name += "_ft" - -model = load_model(model_path+name+'.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) - -# Example of how to use the function -load_image_and_predict('AR_Maumee.tif', prediction_path, model) diff --git a/eval_gan.py b/eval_gan.py deleted file mode 100644 index 371f60e2343534af71a598164600ff194d108e9f..0000000000000000000000000000000000000000 --- a/eval_gan.py +++ /dev/null @@ -1,161 +0,0 @@ -import matplotlib.pyplot as plt -import matplotlib.image as mpimg - -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from tensorflow.keras import layers -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -def f1_score(y_true, y_pred): # Dice coefficient - smooth = 1. - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -#You can change the id for each run so that all models and stats are saved separately. -name_id = "GAN_test" -prediction_path = './predicts_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -print('model location: '+ model_path + name_id + '_generator.h5') - -generator = load_model(os.path.join(model_path, name_id + '_generator.h5'), - custom_objects={'dice_coef_loss': dice_coef_loss}) - -generator.compile(optimizer = Adam(), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy', f1_score]) - -z_dim = 100 # global variable - -def load_img_with_noise(filename, map_dir, legend_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values=[map_img, legend_img]) - map_img = map_img*2.0 - 1.0 - map_img = tf.image.resize(map_img, [256, 256]) - - noise = tf.random.normal([1, z_dim]) # generating noise for one image - - return map_img, noise - -def generate_image(filename, generator, map_dir, legend_dir): - map_img, noise = load_img_with_noise(filename, map_dir, legend_dir) - - # Ensure that map_img has batch dimension - map_img = tf.expand_dims(map_img, 0) - - generated_img = generator([map_img, noise], training=False) - - # Removing the batch dimension for visualization - generated_img = tf.squeeze(generated_img) - - return generated_img.numpy() - -# def load_validation_img(filename): -# return generate_image(filename, -# generator, # Add the generator here -# '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', -# '/projects/bbym/shared/all_patched_data/validation/poly/legend') - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') -validate_map_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') for x in validate_map_file] -# validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_names) -# validate_dataset = validate_dataset.map(load_validation_img) -# validate_dataset = validate_dataset.batch(50) - -print("Load Data Done!") - - -print("Load Model Done!") - -def dice_coef(y_true, y_pred, smooth=1.0): - intersection = tf.reduce_sum(y_true * y_pred, axis=[1,2,3]) - union = tf.reduce_sum(y_true + y_pred, axis=[1,2,3]) - return tf.reduce_mean((2. * intersection + smooth) / (union + smooth), axis=0) - -def dice_coef_loss(y_true, y_pred): - return 1 - dice_coef(y_true, y_pred) - -# model.summary() -# eval_results = model.evaluate(validate_dataset, verbose=1) -# print(eval_results) -# print(f'Validation F1 score: {f1}') - -# If validate_dataset is a tf.data.Dataset instance -def plotResult(fileName, save_dir, generator): - map_img, noise = load_img_with_noise(fileName, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend') - generated_img = generator([tf.expand_dims(map_img, 0), noise], training=False) - predicted_binary = (generated_img > 0.5).numpy().astype(np.uint8) # thresholding - - mapName = '/projects/bbym/shared/all_patched_data/validation/poly/map_patches/' + fileName[0] - segName = '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches/' + fileName[0] - legendName = '/projects/bbym/shared/all_patched_data/validation/poly/legend/' + fileName[1] - - map_img = mpimg.imread(mapName) - seg_img = mpimg.imread(segName) - legend_img = mpimg.imread(legendName) - - # Visualization - plt.figure(figsize=(10, 2)) - - plt.subplot(1, 5, 1) - plt.title("map") - plt.imshow(map_img) - - plt.subplot(1, 5, 2) - plt.title("legend") - plt.imshow(legend_img) - - plt.subplot(1, 5, 3) - plt.title("true segmentation") - plt.imshow(seg_img, cmap='gray') - - plt.subplot(1, 5, 4) - plt.title("predicted segmentation") - plt.imshow(predicted_binary[0, :, :, 0]*255, cmap='gray') - - plt.subplot(1, 5, 5) - plt.title("error") - error_img = np.logical_xor(predicted_binary[0, :, :, 0], seg_img/255.0) # assuming seg_img is in [0, 255] - plt.imshow(error_img, cmap='gray') - - plt.savefig(save_dir + fileName[0] + '.png') - plt.close() - -n=20 - -for fileName in random.sample(validate_map_names, n): - print(fileName) - plotResult(fileName, prediction_path, generator) # Assuming generator is available in this context -print("Save Images Done!") - diff --git a/eval_imagenet.py b/eval_imagenet.py index b96426000f37a40599c670d66a4329afa43bf7c8..8f68e2e5168ad7661214b71a6358a6f1b9cf343b 100644 --- a/eval_imagenet.py +++ b/eval_imagenet.py @@ -214,9 +214,12 @@ def plotResult(fileName, save_dir): n=20 +filenames = [] for fileName in random.sample(validate_map_legend_names, n): - print(fileName) + filenames.append(fileName) + # print(fileName) plotResult(fileName, prediction_path) print("Save Images Done!") +print(filenames) diff --git a/eval_vauner.py b/eval_vauner.py deleted file mode 100644 index 11c9ee579e462d13b2965b939b4e0f646dc15a81..0000000000000000000000000000000000000000 --- a/eval_vauner.py +++ /dev/null @@ -1,205 +0,0 @@ -import tensorflow as tf -import random -import os -import shutil -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -from keras import backend as K -from tensorflow.keras import layers, Model -from tensorflow.keras.models import load_model -from tensorflow.keras.optimizers import Adam -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard - -# Use dice coefficient function as the loss function -def dice_coef(y_true, y_pred): - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (2.0 * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0) - -# Jacard coefficient -def jacard_coef(y_true, y_pred): - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0) - -# calculate loss value -def jacard_coef_loss(y_true, y_pred): - return -jacard_coef(y_true, y_pred) - -# calculate loss value -def dice_coef_loss(y_true, y_pred): - return -dice_coef(y_true, y_pred) - -def vae_loss(y_true, output, beta=1.0): - y_pred, z_mean, z_log_var = output[0], output[1], output[2] - # Reconstruction loss - recon_loss = tf.keras.losses.binary_crossentropy(y_true, y_pred) - recon_loss = tf.reduce_mean(recon_loss) - - # KL divergence loss - kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var) - kl_loss = -0.5 * tf.reduce_sum(kl_loss, axis=-1) - - # Total loss - return recon_loss + beta * kl_loss - -def f1_score(y_true, y_pred): # Dice coefficient - smooth = 1. - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - # Load and preprocess map_img - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - map_img = tf.image.resize(map_img, [256, 256]) - - # Load and preprocess legend_img - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - legend_img = tf.image.resize(legend_img, [256, 256]) - - # Load and preprocess seg_img - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return (legend_img, map_img), seg_img - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -#You can change the id for each run so that all models and stats are saved separately. -name_id = "VAE-unet_dice_coeff" -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -print('model location: '+ model_path + name_id + '.h5') - -model = load_model(os.path.join(model_path, name_id + '.h5'), - custom_objects={'dice_coef': dice_coef,'dice_coef_loss': dice_coef_loss}) - -model.compile(optimizer = Adam(), - loss = dice_coef_loss, - metrics=[dice_coef_loss,'accuracy']) - - -# If validate_dataset is a tf.data.Dataset instance -def plotResult(fileName, save_dir): - - test_dataset = tf.data.Dataset.from_tensor_slices([fileName]) - test_dataset = test_dataset.map(load_validation_img) - test_dataset = test_dataset.batch(1) - - predicted = model.predict(test_dataset) - print(predicted[0].shape) - - # Thresholding the predicted result to get binary values - threshold = 0.5 # you can adjust this value based on your requirement - predicted_binary = (predicted[0] > threshold).astype(np.uint8) # convert boolean to integer (1 or 0) - - mapName = '/projects/bbym/shared/all_patched_data/validation/poly/map_patches/' + fileName[0] - segName = '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches/' + fileName[0] - legendName = '/projects/bbym/shared/all_patched_data/validation/poly/legend/' + fileName[1] - # legendName = '/projects/bbym/nathanj/validation/legend/' + fileName[1] - - map_img = mpimg.imread(mapName) - seg_img = mpimg.imread(segName) - label_img = mpimg.imread(legendName) - - # Set the figure size - plt.figure(figsize=(10, 2)) - - # Plot map image - plt.subplot(1, 5, 1) - plt.title("map") - plt.imshow(map_img) - # Plot legend image - plt.subplot(1, 5, 2) - plt.title("legend") - plt.imshow(label_img) - - # Plot true segmentation image - plt.subplot(1, 5, 3) - plt.title("label") - plt.imshow(seg_img, cmap='gray') - - # Plot predicted segmentation image - plt.subplot(1, 5, 4) - plt.title("prediction") - # print(predicted_binary.shape) - plt.imshow(predicted_binary*255, cmap='gray') - - # Plot error image - plt.subplot(1, 5, 5) - plt.title("error") - - # Normalize both images to the range [0, 1] if they aren't already - seg_img_normalized = seg_img / 255.0 if seg_img.max() > 1 else seg_img - seg_img_normalized = np.expand_dims(seg_img_normalized, axis=-1) - predicted_normalized = predicted_binary if predicted_binary .max() <= 1 else predicted_binary / 255.0 - - # Calculate the error image - error_img = seg_img_normalized - predicted_normalized # simple difference - # error_img = np.logical_xor(predicted_binary, seg_img) - - # Alternatively, for absolute difference: - # error_img = np.abs(seg_img_normalized - predicted_normalized) - - # Display the error image - print(seg_img_normalized.shape, predicted_normalized.shape, error_img.shape) - cax = plt.imshow(error_img) - - # Set the color scale limits if necessary - # cax.set_clim(vmin=-1, vmax=1) # adjust as needed - - # # Add color bar to help interpret the error image - # cbar = plt.colorbar(cax, orientation='vertical', shrink=0.75) - # cbar.set_label('Error Magnitude', rotation=270, labelpad=15) - - # Save the entire figure - plt.savefig(prediction_path + fileName[0] + '.png') - - # Close the figure to release resources - plt.close() - - -n=20 - -for fileName in random.sample(validate_map_legend_names, n): - print(fileName) - plotResult(fileName, prediction_path) -print("Save Images Done!") \ No newline at end of file diff --git a/inference.py b/inference.py index a11e0a48434cd409971e60121462fcac1291a6da..d84132a028cf8ce6800c73437f2e8dcf4e2c405b 100644 --- a/inference.py +++ b/inference.py @@ -2,6 +2,7 @@ import argparse import math import os import numpy as np +import rasterio import tensorflow as tf from keras.models import load_model from data_util import DataLoader @@ -12,20 +13,62 @@ from unet_util import (UNET_224, Residual_CNN_block, dice_coef_loss, evaluate_prediction_result, jacard_coef, multiplication, multiplication2) +# Declare h5_image as a global variable +h5_image = None -def perform_inference(patch, model): - prediction = model.predict(np.expand_dims(patch, axis=0)) - return prediction[0] +def perform_inference(legend_patch, map_patch, model): -def save_results(prediction, outputPath): - # TODO: Implement the function to save the prediction results to the specified output path. - # This is a placeholder and may need to be adjusted based on the desired output format. - with open(outputPath, 'w') as f: + global h5_image + + # Concatenate along the third axis and normalize + input_patch = tf.concat(axis=2, values=[map_patch, legend_patch]) + input_patch = input_patch * 2.0 - 1.0 + + # Resize the input patch + input_patch_resized = tf.image.resize(input_patch, (h5_image.patch_size, h5_image.patch_size)) + + # Expand dimensions for prediction + input_patch_expanded = tf.expand_dims(input_patch_resized, axis=0) + + prediction = model.predict(np.expand_dims(input_patch_expanded, axis=0)) + + return prediction.squeeze() + +def save_results(prediction, outputPath, map_name, legend): + global h5_image + + output_image_path = os.path.join(outputPath, f"{map_name}_{legend}.tif") + + #### Waiting for georeferencing data + # with rasterio.open(map_img_path) as src: + # metadata = src.meta + + # metadata.update({ + # 'dtype': 'uint8', + # 'count': 1, + # 'height': reconstructed_image.shape[0], + # 'width': reconstructed_image.shape[1], + # 'compress': 'lzw', + # }) + + # with rasterio.open(output_image_path, 'w', **metadata) as dst: + # dst.write(reconstructed_image, 1) + + # print(f"Predicted image saved at: {output_image_path}") + # output_image_path = os.path.join(outputPath, f"{os.path.splitext(map_file_name)[0]}_{legend_label}.tif") + + with open(output_image_path, 'w') as f: f.write(str(prediction)) def main(args): + + global h5_image + # Load the HDF5 file using the H5Image class h5_image = H5Image(args.mapPath, mode='r') + + map_name = h5_image.get_maps() + map_legends = h5_image.get_layers(map) # Get the size of the map map_width, map_height = h5_image.get_map_size('map') @@ -34,25 +77,34 @@ def main(args): num_rows = math.ceil(map_width / h5_image.patch_size) num_cols = math.ceil(map_height / h5_image.patch_size) - # Create an empty array to store the full prediction - full_prediction = np.zeros((map_width, map_height)) - # Load the trained model - model = load_model(args.modelPath, custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef}) - + model = load_model(args.modelPath, custom_objects={'multiplication': multiplication, + 'multiplication2': multiplication2, + 'dice_coef_loss':dice_coef_loss, + 'dice_coef':dice_coef}) + # Loop through the patches and perform inference - for row in range(num_rows): - for col in range(num_cols): - patch = h5_image.get_patch(row, col, 'map') - prediction = perform_inference(patch, model) - - # Place the prediction in the corresponding position in the full_prediction array - x_start = row * h5_image.patch_size - y_start = col * h5_image.patch_size - full_prediction[x_start:x_start+h5_image.patch_size, y_start:y_start+h5_image.patch_size] = prediction - - # Save the results - save_results(full_prediction, args.outputPath) + for legend in (map_legends): + + # Create an empty array to store the full prediction + full_prediction = np.zeros((map_width, map_height)) + + legend_patch = h5_image.get_layer(map, legend) + + for row in range(num_rows): + for col in range(num_cols): + + map_patch = h5_image.get_patch(row, col, 'map') + + prediction = perform_inference(legend_patch, map_patch, model) + + # Place the prediction in the corresponding position in the full_prediction array + x_start = row * h5_image.patch_size + y_start = col * h5_image.patch_size + full_prediction[x_start:x_start+h5_image.patch_size, y_start:y_start+h5_image.patch_size] = prediction + + # Save the results + save_results(full_prediction, args.outputPath, map_name, legend) # Close the HDF5 file h5_image.close() diff --git a/test/create_pathes.py b/test/create_pathes.py deleted file mode 100644 index e37d52ebf7d9f97df2b182a4ef0874d36b45b196..0000000000000000000000000000000000000000 --- a/test/create_pathes.py +++ /dev/null @@ -1,146 +0,0 @@ -## The code is copied from Github : https://github.com/priyammaz/DARPAMapExtraction.git -## https://github.com/priyammaz/DARPAMapExtraction/blob/main/data_preprocessing/create_train_patches.py - -import os -import numpy as np -import json -import glob -from itertools import chain -# import module -import traceback - -import matplotlib.pyplot as plt -from PIL import Image -import imageio - -import cv2 -from patchify import patchify, unpatchify - -def hisEqulColor(img): - """ - adaptive histogram equalization - img: 3 channel numpy array - """ - ycrcb=cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) - channels=cv2.split(ycrcb) - cv2.equalizeHist(channels[0],channels[0]) - cv2.merge(channels,ycrcb) - cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2BGR,img) - return img - -def create_patched_image(mapName, HE=False): - # mapName = 'CA_Dubakella.tif' - - mapPath = os.path.join(input_filePath, mapName) - print("Map path:", mapPath) - jsonPath = os.path.join(input_filePath, mapName[0:-4]+'.json') - # print(jsonPath) - - map_img = cv2.imread(mapPath) - - # histogram equilibrium the image - if HE: - map_img = hisEqulColor(map_img) - - # this is for training, no worry to unpatchify - patch_dims = (256,256) - map_im_dims = map_img.shape - patch_overlap = 32 - patch_step = patch_dims[1]-patch_overlap - - map_patchs = patchify(map_img, (*patch_dims,3), patch_step) - - # to cut all the poly legend and save as image - # read-in json legend - with open(jsonPath, 'r') as f: - jsonData = json.load(f) - # print(jsonData) - - LegendList = [x['label'] for x in jsonData['shapes']] - # print(LegendList) - for label_dict in jsonData['shapes']: - - point_coord = label_dict['points'] - flatten_list = list(chain.from_iterable(point_coord)) - - if point_coord[0][0] >= point_coord[1][0] or point_coord[0][1] >= point_coord[1][1] or (len(flatten_list)!=4): - # print("Coordinate that has problem: ", mapPath, label_dict['label'], point_coord) - x_coord = [x[0] for x in point_coord] - y_coord = [x[1] for x in point_coord] - x_low, y_low, x_hi, y_hi = int(min(x_coord)), int(min(y_coord)), int(max(x_coord)), int(max(y_coord)) - - else: x_low, y_low, x_hi, y_hi = [int(x) for x in flatten_list] - - legend_coor = [(x_low, y_low), (x_hi, y_hi)] - shift_pixel = 4 - im_crop = map_img[y_low+shift_pixel:y_hi-shift_pixel, x_low+shift_pixel:x_hi-shift_pixel] # need to resize - - im_crop_resize = cv2.resize(im_crop, dsize=patch_dims, interpolation=cv2.INTER_CUBIC) - - writefile = mapName.split('.')[0]+'_'+label_dict['label']+'.png' - print(writefile) - if label_dict['label'].endswith('_poly'): - imageio.imwrite(os.path.join(write_filePath, 'poly', 'legend', writefile), im_crop_resize.astype(np.uint8)) - elif label_dict['label'].endswith('_line'): - imageio.imwrite(os.path.join(write_filePath, 'line', 'legend', writefile), im_crop_resize.astype(np.uint8)) - if label_dict['label'].endswith('_pt'): - imageio.imwrite(os.path.join(write_filePath, 'point', 'legend', writefile), im_crop_resize.astype(np.uint8)) - - # keep patches that only when np.sum > 100 - for Legend in LegendList: - - segTif = mapPath.split('.')[0]+'_'+Legend+'.tif' - print(segTif) - seg_img = cv2.imread(segTif) - seg_patchs = patchify(seg_img, (*patch_dims,3), patch_step) - - for i in range(seg_patchs.shape[0]): - for j in range(seg_patchs.shape[1]): - - filename =mapPath.split('.')[0].split('/')[-1] - writefile = '_'.join([filename, Legend, str(i), str(j)])+'.png' - - if Legend.endswith('_poly') and np.sum(seg_patchs[i][j][0]) > 100: - write_seg = os.path.join(write_filePath, 'poly', 'seg_patches', writefile) - write_map = os.path.join(write_filePath, 'poly', 'map_patches', writefile) - imageio.imwrite(write_seg, (seg_patchs[i][j][0][:,:,0]).astype(np.uint8)) - imageio.imwrite(write_map, (map_patchs[i][j][0]).astype(np.uint8)) - - elif Legend.endswith('_line') and np.sum(seg_patchs[i][j][0]) > 5: - write_seg = os.path.join(write_filePath, 'line', 'seg_patches', writefile) - write_map = os.path.join(write_filePath, 'line', 'map_patches', writefile) - imageio.imwrite(write_seg, (seg_patchs[i][j][0][:,:,0]).astype(np.uint8)) - imageio.imwrite(write_map, (map_patchs[i][j][0]).astype(np.uint8)) - - if Legend.endswith('_pt') and np.sum(seg_patchs[i][j][0]) > 0: - write_seg = os.path.join(write_filePath, 'point', 'seg_patches', writefile) - write_map = os.path.join(write_filePath, 'point', 'map_patches', writefile) - imageio.imwrite(write_seg, (seg_patchs[i][j][0][:,:,0]).astype(np.uint8)) - imageio.imwrite(write_map, (map_patchs[i][j][0]).astype(np.uint8)) - -if __name__ == "__main__": - ## define file path - # input_filePath = '/projects/bbym/shared/data/cma/training' - # write_filePath = '/projects/bbym/nathanj/attentionUnet/data/validation_samples' - input_filePath = '/projects/bbym/shared/data/cma/training' - write_filePath = '/projects/bbym/nathanj/attentionUnet/data/training_samples' - - jsonFiles = [x.split('/')[-1] for x in glob.glob(input_filePath+'/'+'*.json')] - # print(jsonFiles) - - for jsonFile in jsonFiles: - # print(jsonFile) - if os.path.exists(os.path.join(write_filePath, 'finished', jsonFile)): - continue - else: - with open(os.path.join(write_filePath, 'finished', jsonFile), 'w') as fp: - pass - try: - mapName = jsonFile[0:-5]+'.tif' - # print(mapName) - create_patched_image(mapName) - except Exception as e: - print(e) - traceback.print_exc() - print("A file has something wrong with its legend: ", jsonFile) - break diff --git a/test/create_pathes_224.py b/test/create_pathes_224.py deleted file mode 100644 index 3ef08561bfb6e86ea5616c7a1a7a327e9ffecbe7..0000000000000000000000000000000000000000 --- a/test/create_pathes_224.py +++ /dev/null @@ -1,151 +0,0 @@ -## The code is copied from Github : https://github.com/priyammaz/DARPAMapExtraction.git -## https://github.com/priyammaz/DARPAMapExtraction/blob/main/data_preprocessing/create_train_patches.py - -import os -import numpy as np -import json -import glob -from itertools import chain -# import module -import traceback - -import matplotlib.pyplot as plt -from PIL import Image -import imageio - -import cv2 -from patchify import patchify, unpatchify - -def hisEqulColor(img): - """ - adaptive histogram equalization - img: 3 channel numpy array - """ - ycrcb=cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) - channels=cv2.split(ycrcb) - cv2.equalizeHist(channels[0],channels[0]) - cv2.merge(channels,ycrcb) - cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2BGR,img) - return img - -def create_patched_image(mapName, HE=False): - # mapName = 'CA_Dubakella.tif' - - mapPath = os.path.join(input_filePath, mapName) - print("Map path:", mapPath) - jsonPath = os.path.join(input_filePath, mapName[0:-4]+'.json') - # print(jsonPath) - - map_img = cv2.imread(mapPath) - - # histogram equilibrium the image - if HE: - map_img = hisEqulColor(map_img) - - # this is for training, no worry to unpatchify - patch_dims = (224,224) - map_im_dims = map_img.shape - patch_overlap = 32 - patch_step = patch_dims[1]-patch_overlap - - map_patchs = patchify(map_img, (*patch_dims,3), patch_step) - - # to cut all the poly legend and save as image - # read-in json legend - with open(jsonPath, 'r') as f: - jsonData = json.load(f) - # print(jsonData) - - LegendList = [x['label'] for x in jsonData['shapes']] - # print(LegendList) - for label_dict in jsonData['shapes']: - - point_coord = label_dict['points'] - flatten_list = list(chain.from_iterable(point_coord)) - - if point_coord[0][0] >= point_coord[1][0] or point_coord[0][1] >= point_coord[1][1] or (len(flatten_list)!=4): - # print("Coordinate that has problem: ", mapPath, label_dict['label'], point_coord) - x_coord = [x[0] for x in point_coord] - y_coord = [x[1] for x in point_coord] - x_low, y_low, x_hi, y_hi = int(min(x_coord)), int(min(y_coord)), int(max(x_coord)), int(max(y_coord)) - - else: x_low, y_low, x_hi, y_hi = [int(x) for x in flatten_list] - - legend_coor = [(x_low, y_low), (x_hi, y_hi)] - shift_pixel = 4 - im_crop = map_img[y_low+shift_pixel:y_hi-shift_pixel, x_low+shift_pixel:x_hi-shift_pixel] # need to resize - - im_crop_resize = cv2.resize(im_crop, dsize=patch_dims, interpolation=cv2.INTER_CUBIC) - - writefile = mapName.split('.')[0]+'_'+label_dict['label']+'.png' - print(writefile) - if label_dict['label'].endswith('_poly'): - imageio.imwrite(os.path.join(write_filePath, 'poly', 'legend', writefile), im_crop_resize.astype(np.uint8)) - elif label_dict['label'].endswith('_line'): - imageio.imwrite(os.path.join(write_filePath, 'line', 'legend', writefile), im_crop_resize.astype(np.uint8)) - if label_dict['label'].endswith('_pt'): - imageio.imwrite(os.path.join(write_filePath, 'point', 'legend', writefile), im_crop_resize.astype(np.uint8)) - - # keep patches that only when np.sum > 100 - for Legend in LegendList: - - segTif = mapPath.split('.')[0]+'_'+Legend+'.tif' - print(segTif) - seg_img = cv2.imread(segTif) - seg_patchs = patchify(seg_img, (*patch_dims,3), patch_step) - - for i in range(seg_patchs.shape[0]): - for j in range(seg_patchs.shape[1]): - - filename =mapPath.split('.')[0].split('/')[-1] - writefile = '_'.join([filename, Legend, str(i), str(j)])+'.png' - - if Legend.endswith('_poly') and np.sum(seg_patchs[i][j][0]) > 100: - write_seg = os.path.join(write_filePath, 'poly', 'seg_patches', writefile) - write_map = os.path.join(write_filePath, 'poly', 'map_patches', writefile) - imageio.imwrite(write_seg, (seg_patchs[i][j][0][:,:,0]).astype(np.uint8)) - imageio.imwrite(write_map, (map_patchs[i][j][0]).astype(np.uint8)) - - elif Legend.endswith('_line') and np.sum(seg_patchs[i][j][0]) > 5: - write_seg = os.path.join(write_filePath, 'line', 'seg_patches', writefile) - write_map = os.path.join(write_filePath, 'line', 'map_patches', writefile) - imageio.imwrite(write_seg, (seg_patchs[i][j][0][:,:,0]).astype(np.uint8)) - imageio.imwrite(write_map, (map_patchs[i][j][0]).astype(np.uint8)) - - if Legend.endswith('_pt') and np.sum(seg_patchs[i][j][0]) > 0: - write_seg = os.path.join(write_filePath, 'point', 'seg_patches', writefile) - write_map = os.path.join(write_filePath, 'point', 'map_patches', writefile) - imageio.imwrite(write_seg, (seg_patchs[i][j][0][:,:,0]).astype(np.uint8)) - imageio.imwrite(write_map, (map_patchs[i][j][0]).astype(np.uint8)) - -if __name__ == "__main__": - ## define file path - input_filePath = '/projects/bbym/shared/data/cma/training' - write_filePath = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples' - # write_filePath = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples' - - jsonFiles = [x.split('/')[-1] for x in glob.glob(input_filePath+'/'+'*.json')] - # print(jsonFiles) - i = 0 - num_train_img = 20 - - for jsonFile in jsonFiles: - # print(jsonFile) - if os.path.exists(os.path.join(write_filePath, 'finished', jsonFile)): - continue - else: - with open(os.path.join(write_filePath, 'finished', jsonFile), 'w') as fp: - pass - try: - mapName = jsonFile[0:-5]+'.tif' - # print(mapName) - create_patched_image(mapName) - except Exception as e: - print(e) - traceback.print_exc() - print("A file has something wrong with its legend: ", jsonFile) - - if(i>num_train_img): - break - - i = i + 1 diff --git a/test/create_sample_output.py b/test/create_sample_output.py deleted file mode 100644 index 187dbfb2efc3f564d03c764985b6863096d8b2c9..0000000000000000000000000000000000000000 --- a/test/create_sample_output.py +++ /dev/null @@ -1,203 +0,0 @@ - -import os -import shutil -import random -import rasterio -import numpy as np -from PIL import Image -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -# import segmentation_models as sm -# from keras.models import load_model -from patchify import patchify, unpatchify -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -# Set the limit to a larger value than the default -Image.MAX_IMAGE_PIXELS = 200000000 # For example, allow up to 200 million pixels - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = 'unproecessed_legends' #You can change the id for each run so that all models and stats are saved separately. -prediction_path = './predicts_'+name_id+'/' -model_path = './models_'+name_id+'/' - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' -name = 'Unet-'+ backend - -finetune = False -if (finetune): name += "_ft" - -model = load_model(model_path+name+'.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) - - -def unpatchify(patches, img_shape, overlap=0): - """ - Reconstructs an image from overlapping patches. - - Parameters: - patches (numpy array): Image patches. - img_shape (tuple): Shape of the original image. - overlap (int): Overlap of patches along rows and columns. - - Returns: - numpy array: Reconstructed image. - """ - assert overlap >= 0, "Overlap should be non-negative" - - step = patches.shape[2] - overlap - img = np.zeros(img_shape) # Initialize the image with zeros - patch_count = np.zeros(img_shape, dtype=int) # Initialize the count of patches contributing to each pixel - - # Iterate over the patches, adding them into the reconstructed image - for i in range(patches.shape[0]): - for j in range(patches.shape[1]): - x_start = i * step - y_start = j * step - x_end = min(x_start + patches.shape[2], img_shape[0]) - y_end = min(y_start + patches.shape[3], img_shape[1]) - - img[x_start:x_end, y_start:y_end] += patches[i, j, :x_end-x_start, :y_end-y_start] - patch_count[x_start:x_end, y_start:y_end] += 1 - - # Average the pixel values by dividing by the count of patches contributing to each pixel - img /= patch_count - - return img - - -def load_image_and_predict(map_file_name, prediction_path): - """ - Load map image, find corresponding legend images, create inputs, predict, and reconstruct images. - - Parameters: - map_file_name (str): Name of the map image file (e.g., 'AR_Maumee.tif'). - """ - # Define directories - map_dir = '/projects/bbym/shared/data/cma/validation/' - # legend_dir = '/projects/bbym/nathanj/attentionUnet/example_data/processed_legend/' # processed legends - legend_dir = '/projects/bbym/shared/all_patched_data/validation/poly/legend/' - - # Load and normalize map image - print(f"Loading and normalizing map image: {map_file_name}") - map_img_path = os.path.join(map_dir, map_file_name) - map_img = Image.open(map_img_path) - orig_size = map_img.size - map_img = np.array(map_img) / 255.0 - - # Define patch size and overlap - patch_size = (256, 256, 3) - overlap = 30 - step_size = patch_size[0] - overlap - - # Add pixels to make the image size divisible by step_size - pad_x = (step_size - (map_img.shape[1] % step_size)) % step_size - pad_y = (step_size - (map_img.shape[0] % step_size)) % step_size - map_img = np.pad(map_img, ((0, pad_y), (0, pad_x), (0, 0)), mode='constant') # mode can be 'edge' to pad with the edge values - - # Patchify image - print(f"Patchifying map image with overlap...") - map_patches = patchify(map_img, patch_size, step=step_size) - - base_name = os.path.splitext(map_file_name)[0] - - legend_files = [f for f in os.listdir(legend_dir) if f.startswith(base_name)] - print(f"Found {len(legend_files)} legend files associated with {map_file_name}") - - for legend_file in legend_files: - - # Load and normalize legend image using TensorFlow - legend_img_path = os.path.join(legend_dir, legend_file) - print(f"Processing legend file: {legend_img_path}") - - legend_img_raw = tf.io.read_file(legend_img_path) - legend_img = tf.cast(tf.io.decode_png(legend_img_raw), dtype=tf.float32) / 255.0 - - # Convert legend_img back to uint8 and scale values to 0-255 - legend_img_uint8 = tf.cast(legend_img * 255, dtype=tf.uint8).numpy() - - # Create a PIL Image object from the numpy array - legend_img_pil = Image.fromarray(legend_img_uint8) - - # Specify the path where you want to save the image - output_legend_img_path = os.path.join(prediction_path, f"processed_{legend_file}") - - # Save the image - legend_img_pil.save(output_legend_img_path, 'PNG') - - print(f"Map patches shape: {map_patches.shape}") - total_row, total_col, _, _, _, _ = map_patches.shape - predicted_patches = np.zeros((total_row, total_col, patch_size[0], patch_size[1])) - - print("Making predictions on patches...") - for i in range(total_row): - for j in range(total_col): - single_map_patch = map_patches[i, j, :, :][0] - - # # Convert numpy array to TensorFlow tensor and normalize it - # single_map_patch_tensor = tf.convert_to_tensor(single_map_patch, dtype=tf.float32) - - # Concatenate along the third axis and normalize - input_patch = tf.concat(axis=2, values=[single_map_patch, legend_img]) - input_patch = input_patch * 2.0 - 1.0 - - # Resize the input patch - input_patch_resized = tf.image.resize(input_patch, [256, 256]) - - # Expand dimensions for prediction - input_patch_expanded = tf.expand_dims(input_patch_resized, axis=0) - - # Make prediction and store it - predicted_patch = model.predict(input_patch_expanded, verbose = 0) - predicted_patches[i, j, :, :] = predicted_patch.squeeze() - - print("Reconstructing and saving predicted image...") - reconstructed_image = unpatchify(predicted_patches, map_img.shape[:2], overlap) - - # print(reconstructed_image.shape) - - # Crop to original size - reconstructed_image = reconstructed_image[:orig_size[1], :orig_size[0]] - # reconstructed_image = (reconstructed_image >= 0.5).astype(np.bool_) - - output_image_path = os.path.join(prediction_path, f'{os.path.splitext(legend_file)[0]}.tif') - - # Read metadata from original image - with rasterio.open(map_img_path) as src: - metadata = src.meta - - # Update metadata with new shape (if necessary) - metadata.update({ - 'dtype': 'uint8', - 'count': 1, - 'height': reconstructed_image.shape[0], - 'width': reconstructed_image.shape[1], - 'compress': 'lzw', # You can also try 'packbits', 'deflate', etc. - }) - - # Write output image with metadata - with rasterio.open(output_image_path, 'w', **metadata) as dst: - dst.write((reconstructed_image.astype(np.bool_))*255, 1) - - print(f"Predicted image saved at: {output_image_path}") - - break - - -# Example of how to use the function -load_image_and_predict('AR_Maumee.tif', prediction_path) \ No newline at end of file diff --git a/test/create_sample_output_json.py b/test/create_sample_output_json.py deleted file mode 100644 index 7f084a0e912f6bac1caf2f4e24f78c45123cd6a1..0000000000000000000000000000000000000000 --- a/test/create_sample_output_json.py +++ /dev/null @@ -1,202 +0,0 @@ - -import os -import cv2 -import json -import shutil -import random -import rasterio -import numpy as np -from PIL import Image -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -# import segmentation_models as sm -# from keras.models import load_model -from patchify import patchify, unpatchify -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -# Set the limit to a larger value than the default -Image.MAX_IMAGE_PIXELS = 200000000 # For example, allow up to 200 million pixels - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = 'unproecessed_legends' #You can change the id for each run so that all models and stats are saved separately. -prediction_path = './predicts_'+name_id+'/' -model_path = './models_'+name_id+'/' - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' -name = 'Unet-'+ backend - -finetune = False -if (finetune): name += "_ft" - -model = load_model(model_path+name+'.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) - -def unpatchify(patches, img_shape, overlap=0): - """ - Reconstructs an image from overlapping patches, keeping the maximum value - for overlapping pixels. - - Parameters: - patches (numpy array): Image patches. - img_shape (tuple): Shape of the original image. - overlap (int): Overlap of patches along rows and columns. - - Returns: - numpy array: Reconstructed image. - """ - assert overlap >= 0, "Overlap should be non-negative" - - step = patches.shape[2] - overlap - img = np.zeros(img_shape) # Initialize the image with zeros - - # Iterate over the patches - for i in range(patches.shape[0]): - for j in range(patches.shape[1]): - x_start = i * step - y_start = j * step - x_end = min(x_start + patches.shape[2], img_shape[0]) - y_end = min(y_start + patches.shape[3], img_shape[1]) - - # Instead of summing, keep the maximum value among all patches contributing to each pixel - img[x_start:x_end, y_start:y_end] = np.maximum( - img[x_start:x_end, y_start:y_end], - patches[i, j, :x_end-x_start, :y_end-y_start] - ) - - return img - - -def load_image_and_predict(map_file_name, prediction_path): - """ - Load map image, find corresponding legend images, create inputs, predict, and reconstruct images. - - Parameters: - map_file_name (str): Name of the map image file (e.g., 'AR_Maumee.tif'). - """ - # Define directories - map_dir = '/projects/bbym/shared/data/cma/validation/' - json_dir = map_dir - - # Load and normalize map image - print(f"Loading and normalizing map image: {map_file_name}") - map_img_path = os.path.join(map_dir, map_file_name) - map_img = Image.open(map_img_path) - orig_size = map_img.size - map_img = np.array(map_img) / 255.0 - - # Define patch size and overlap - patch_size = (256, 256, 3) - overlap = 30 - step_size = patch_size[0] - overlap - - # Add pixels to make the image size divisible by step_size - pad_x = (step_size - (map_img.shape[1] % step_size)) % step_size - pad_y = (step_size - (map_img.shape[0] % step_size)) % step_size - map_img = np.pad(map_img, ((0, pad_y), (0, pad_x), (0, 0)), mode='constant') # mode can be 'edge' to pad with the edge values - - # Patchify image - print(f"Patchifying map image with overlap...") - map_patches = patchify(map_img, patch_size, step=step_size) - - # Read JSON file - json_file_name = os.path.splitext(map_file_name)[0] + '.json' - json_file_path = os.path.join(json_dir, json_file_name) - - with open(json_file_path, 'r') as json_file: - json_data = json.load(json_file) - - # Extract legend rectangles from JSON data - legends = [shape for shape in json_data['shapes'] if 'poly' in shape['label']] - - for legend in legends: - # Extract and normalize the legend rectangle - points = np.array(legend['points']) - top_left = points.min(axis=0) - bottom_right = points.max(axis=0) - legend_img = map_img[int(top_left[1]):int(bottom_right[1]), int(top_left[0]):int(bottom_right[0]), :] - legend_img = tf.image.resize(legend_img, [256, 256]) - - # Convert legend_img back to uint8 and scale values to 0-255 - legend_img_uint8 = tf.cast(legend_img * 255, dtype=tf.uint8).numpy() - # Create a PIL Image object from the numpy array - legend_img_pil = Image.fromarray(legend_img_uint8) - # Specify the path where you want to save the image - output_legend_img_path = os.path.join(prediction_path, f"processed_{legend['label']}.png") - # Save the image - legend_img_pil.save(output_legend_img_path, 'PNG') - - print(f"Map patches shape: {map_patches.shape}") - total_row, total_col, _, _, _, _ = map_patches.shape - predicted_patches = np.zeros((total_row, total_col, patch_size[0], patch_size[1])) - - print("Making predictions on patches...") - for i in range(total_row): - for j in range(total_col): - single_map_patch = map_patches[i, j, :, :][0] - - # # Convert numpy array to TensorFlow tensor and normalize it - # single_map_patch_tensor = tf.convert_to_tensor(single_map_patch, dtype=tf.float32) - - # Concatenate along the third axis and normalize - input_patch = tf.concat(axis=2, values=[single_map_patch, legend_img]) - input_patch = input_patch * 2.0 - 1.0 - - # Resize the input patch - input_patch_resized = tf.image.resize(input_patch, [256, 256]) - - # Expand dimensions for prediction - input_patch_expanded = tf.expand_dims(input_patch_resized, axis=0) - - # Make prediction and store it - predicted_patch = model.predict(input_patch_expanded, verbose = 0) - predicted_patches[i, j, :, :] = predicted_patch.squeeze() - - print("Reconstructing and saving predicted image...") - reconstructed_image = unpatchify(predicted_patches, map_img.shape[:2], overlap) - - # print(reconstructed_image.shape) - - # Crop to original size - reconstructed_image = reconstructed_image[:orig_size[1], :orig_size[0]].astype(np.uint8) - print(np.unique(reconstructed_image)) - - output_image_path = os.path.join(prediction_path, f"{os.path.splitext(map_file_name)[0]}_{legend['label']}.tif") - - # Read metadata from original image - with rasterio.open(map_img_path) as src: - metadata = src.meta - - # Update metadata with new shape (if necessary) - metadata.update({ - 'dtype': 'uint8', - 'count': 1, - 'height': reconstructed_image.shape[0], - 'width': reconstructed_image.shape[1], - 'compress': 'lzw', # You can also try 'packbits', 'deflate', etc. - }) - - # Write output image with metadata - with rasterio.open(output_image_path, 'w', **metadata) as dst: - dst.write(reconstructed_image*255, 1) - - print(f"Predicted image saved at: {output_image_path}") - - -# Example of how to use the function -load_image_and_predict('AR_Maumee.tif', prediction_path) \ No newline at end of file diff --git a/test/eval.py b/test/eval.py deleted file mode 100644 index 5ea4fa2771bdae0dfcb9bc4a17cbb481e74b7048..0000000000000000000000000000000000000000 --- a/test/eval.py +++ /dev/null @@ -1,266 +0,0 @@ -import matplotlib.pyplot as plt -import matplotlib.image as mpimg - -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - - - -################################################################ -################# Load Validation Data ######################### -################################################################ - -# def load_img(filename, map_dir, legend_dir, seg_dir): -# mapName = tf.strings.join([map_dir, filename[0]], separator='/') -# legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - -# map_img = tf.io.read_file(mapName) -# map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - -# legend_img = tf.io.read_file(legendName) -# legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - -# map_img = tf.concat(axis=2, values=[map_img, legend_img]) -# map_img = map_img*2.0 - 1.0 -# map_img = tf.image.resize(map_img, [256, 256]) - -# segName = tf.strings.join([seg_dir, filename[0]], separator='/') -# seg_img = tf.io.read_file(segName) -# seg_img = tf.io.decode_png(seg_img) -# seg_img = tf.image.resize(seg_img, [256, 256]) - -# return map_img, seg_img - -# def load_validation_img(filename): -# return load_img(filename, -# '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', -# # '/projects/bbym/nathanj/validation/legend', # processed legends -# '/projects/bbym/shared/all_patched_data/validation/poly/legend', # unprocessed legends -# '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - -# validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# # Pre-filter map files based on existence of corresponding legend files -# validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') -# for x in validate_map_file -# if os.path.exists(os.path.join( -# # '/projects/bbym/nathanj/validation/legend', # processed legends -# '/projects/bbym/shared/all_patched_data/validation/poly/legend' # unprocessed legends -# '_'.join(x.split('_')[0:-2])+'.png'))] - -# validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -# validate_dataset = validate_dataset.map(load_validation_img) -# validate_dataset = validate_dataset.batch(50) - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values=[map_img, legend_img]) - map_img = map_img*2.0 - 1.0 - map_img = tf.image.resize(map_img, [256, 256]) - - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return map_img, seg_img - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - - - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - - -print("Load Data Done!") - - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = 'unproecessed_legends' #You can change the id for each run so that all models and stats are saved separately. -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -finetune = False -if (finetune): name += "_ft" - -logdir = log_path + name -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Load the best model saved by the callback module -from keras.models import load_model -if(backend != "attentionUnet"): - model = load_model(model_path+name+'.h5', - custom_objects={'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) -else: - model = load_model(model_path+name+'.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) -print("Load Model Done!") - -def f1_score(y_true, y_pred): # Dice coefficient - smooth = 1. - y_true_f = K.flatten(y_true) - y_pred_f = K.flatten(y_pred) - intersection = K.sum(y_true_f * y_pred_f) - return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) - -model.compile(optimizer = Adam(), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy', f1_score]) - -eval_results = model.evaluate(validate_dataset, verbose=1) -print(eval_results) -# print(f'Validation F1 score: {f1}') - -# If validate_dataset is a tf.data.Dataset instance -def plotResult(fileName, save_dir): - - test_dataset = tf.data.Dataset.from_tensor_slices([fileName]) - test_dataset = test_dataset.map(load_validation_img) - test_dataset = test_dataset.batch(1) - - # Extracting and visualizing the first image from the first batch - for batch in test_dataset.take(1): # Taking one batch from the dataset - input_test = batch[0] # Extracting the first image from the batch - print(input_test.shape) - - predicted = model.predict(test_dataset) - print(predicted.shape) - - # Thresholding the predicted result to get binary values - threshold = 0.5 # you can adjust this value based on your requirement - predicted_binary = (predicted > threshold).astype(np.uint8) # convert boolean to integer (1 or 0) - - mapName = '/projects/bbym/shared/all_patched_data/validation/poly/map_patches/' + fileName[0] - segName = '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches/' + fileName[0] - legendName = '/projects/bbym/shared/all_patched_data/validation/poly/legend/' + fileName[1] - # legendName = '/projects/bbym/nathanj/validation/legend/' + fileName[1] - - map_img = mpimg.imread(mapName) - seg_img = mpimg.imread(segName) - label_img = mpimg.imread(legendName) - - # Set the figure size - plt.figure(figsize=(10, 2)) - - # Plot map image - plt.subplot(1, 5, 1) - plt.title("map") - plt.imshow(input_test[0,:,:,:3]) - # Plot legend image - plt.subplot(1, 5, 2) - plt.title("legend") - plt.imshow(input_test[0,:,:,3:]) - - # Plot true segmentation image - plt.subplot(1, 5, 3) - plt.title("true segmentation") - plt.imshow(seg_img, cmap='gray') - - # Plot predicted segmentation image - plt.subplot(1, 5, 4) - plt.title("predicted segmentation") - plt.imshow(predicted_binary[0, :, :, 0]*255, cmap='gray') - - # Plot error image - plt.subplot(1, 5, 5) - plt.title("error") - - # Normalize both images to the range [0, 1] if they aren't already - seg_img_normalized = seg_img / 255.0 if seg_img.max() > 1 else seg_img - predicted_normalized = predicted_binary[0, :, :, 0] if predicted_binary.max() <= 1 else predicted_binary[0, :, :, 0] / 255.0 - - # Calculate the error image - # error_img = seg_img_normalized - predicted_normalized # simple difference - error_img = np.logical_xor(predicted_binary[0, :, :, 0], seg_img) - - # Alternatively, for absolute difference: - # error_img = np.abs(seg_img_normalized - predicted_normalized) - - # Display the error image - cax = plt.imshow(error_img, cmap='gray') - - # Set the color scale limits if necessary - # cax.set_clim(vmin=-1, vmax=1) # adjust as needed - - # # Add color bar to help interpret the error image - # cbar = plt.colorbar(cax, orientation='vertical', shrink=0.75) - # cbar.set_label('Error Magnitude', rotation=270, labelpad=15) - - # Save the entire figure - plt.savefig(prediction_path + fileName[0] + '.png') - - # Close the figure to release resources - plt.close() - - -n=20 - -for fileName in random.sample(validate_map_legend_names, n): - print(fileName) - plotResult(fileName, prediction_path) -print("Save Images Done!") - diff --git a/test/eval_224.py b/test/eval_224.py deleted file mode 100644 index 56c0af84e25f673e067128fb702ba96193cb1acc..0000000000000000000000000000000000000000 --- a/test/eval_224.py +++ /dev/null @@ -1,170 +0,0 @@ -import matplotlib.pyplot as plt -import matplotlib.image as mpimg - -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - - - -################################################################ -################# Load Validation Data ######################### -################################################################ -def load_validation_img(filename): - - mapName = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/map_patches/'+filename[0] - legendName = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/legend/'+filename[1] - - map_img = tf.io.read_file(mapName) # Read image file - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) # Read image file - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values = [map_img, legend_img]) - map_img = map_img*2.0 - 1.0 # range(-1.0,1.0) - map_img = tf.image.resize(map_img, [224, 224]) - - segName = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/seg_patches/'+filename[0] - - legend_img = tf.io.read_file(segName) # Read image file - legend_img = tf.io.decode_png(legend_img) - legend_img = tf.image.resize(legend_img, [224, 224]) - legend_img = legend_img - - return map_img, legend_img - -# A peek of how BatchDataset -# it = iter(train_dataset) -# print(next(it)) -validate_map_file = os.listdir('/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/map_patches') -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') for x in validate_map_file] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -print("Load Data Done!") - - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = '11092023-16' #You can change the id for each run so that all models and stats are saved separately. -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Load the best model saved by the callback module -from keras.models import load_model -if(backend != "attentionUnet"): - model = load_model(model_path+name+'_ft.h5', - custom_objects={'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) -else: - model = load_model(model_path+name+'_ft.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) - -print("Load Model Done!") - -def plotResult(n, fileName, save_dir): - - test_dataset = tf.data.Dataset.from_tensor_slices([fileName]) - test_dataset = test_dataset.map(load_validation_img) - test_dataset = test_dataset.batch(1) - - predicted = model.predict(test_dataset) - - mapName = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/map_patches/' + fileName[0] - segName = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/seg_patches/' + fileName[0] - legendName = '/projects/bbym/nathanj/attentionUnet/data_224/validation_samples/poly/legend/' + fileName[1] - - map_img = mpimg.imread(mapName) - seg_img = mpimg.imread(segName) - label_img = mpimg.imread(legendName) - - print(predicted.shape) - print(map_img.shape) - print(seg_img.shape) - print(label_img.shape) - - plt.title("map") - plt.rcParams["figure.figsize"] = (25, 10) - - plt.subplot(n, 5, 1) - plt.imshow(map_img) - # plt.savefig(save_dir + 'map.png') # Save the map image - - plt.subplot(n, 5, 2) - plt.title("legend") - plt.imshow(label_img) - # plt.savefig(save_dir + 'legend.png') # Save the legend image - - plt.subplot(n, 5, 3) - plt.title("true segmentation") - plt.imshow(seg_img) - # plt.savefig(save_dir + 'true_segmentation.png') # Save the true segmentation image - - plt.subplot(n, 5, 4) - plt.title("predicted segmentation") - # predicted[predicted >= 0.5] = 1 - # predicted[predicted < 0.5] = 0 - plt.imshow(predicted[0, :, :, 0]) - # plt.savefig(save_dir + 'predicted_segmentation.png') # Save the predicted segmentation image - - plt.subplot(n, 5, 5) - plt.title("error") - plt.imshow((predicted[0, :, :, 0] - seg_img).astype(int)) - - plt.savefig(prediction_path +fileName[0]+ '.png') # Save the error image - - # Close the figure to release resources - plt.close() - -n=10 -for fileName in random.sample(validate_map_legend_names, n): - print(fileName) - plotResult(1, fileName, prediction_path) - -print("Save Images Done!") diff --git a/test/model.png b/test/model.png deleted file mode 100644 index 96ff27eddb3e637f27a6e4209851b0e3a6d6f4a1..0000000000000000000000000000000000000000 Binary files a/test/model.png and /dev/null differ diff --git a/test/model1.png b/test/model1.png deleted file mode 100644 index 5d280449bb26e3aa5bd47a68fd82c5f1466f19a7..0000000000000000000000000000000000000000 Binary files a/test/model1.png and /dev/null differ diff --git a/test/poly_legend_process.py b/test/poly_legend_process.py deleted file mode 100644 index 63c508eff3e722ec6bb3f1264c4a05822bb7e23b..0000000000000000000000000000000000000000 --- a/test/poly_legend_process.py +++ /dev/null @@ -1,107 +0,0 @@ -import cv2 -import numpy as np -from sklearn.cluster import MiniBatchKMeans -import os - -def weighted_euclidean_distance(color1, color2): - r1, g1, b1 = color1*255 - r2, g2, b2 = color2*255 - return np.sqrt(0.3 * ((r1 - r2) ** 2) + 0.59 * ((g1 - g2) ** 2) + 0.11 * ((b1 - b2) ** 2)) - -def find_optimal_num_clusters(image, max_clusters=10, size_threshold=20000): - image_normalized = image.astype(np.float32) / 255.0 - pixels = image_normalized.reshape((-1, 3)) - - best_num_clusters = 1 # Default to 1 cluster - cluster_results = {} # Dictionary to store results for different cluster counts - results_colors = set() - - for num_clusters in range(1, max_clusters + 1): - kmeans = MiniBatchKMeans(n_clusters=num_clusters, n_init ='auto') - kmeans.fit(pixels) - labels = kmeans.labels_ - centers = kmeans.cluster_centers_ - - # Check if all cluster centers are sufficiently different - pairwise_distances = [weighted_euclidean_distance(centers[i], centers[j]) - for i in range(num_clusters) for j in range(i + 1, num_clusters)] - - if all(dist >= 50 for dist in pairwise_distances): - # print(centers , pairwise_distances) - cluster_results[num_clusters] = (num_clusters, labels, centers) - else: - break - - # Return the best cluster result based on size and color - for num_clusters in sorted(cluster_results.keys(), reverse=True): - labels, centers = cluster_results[num_clusters][1:] - - # Check each cluster size and color - for i in range(num_clusters): - cluster_size = np.sum(labels == i) - cluster_color = centers[i] - - # print(cluster_size, cluster_color) - - if cluster_size >= size_threshold: - results_colors.add(tuple(cluster_color)) - - # Check if there are more than two colors and remove black and white colors - if len(results_colors) >= 2: - results_colors = {color for color in results_colors if not (np.all(np.array(color) < 0.25) or np.all(np.array(color) > 0.95))} - - # print(len(results_colors), np.array(list(results_colors))) - - return len(results_colors), np.array(list(results_colors)) - -def process_image(input_image_path, target_folder): - filename = os.path.basename(input_image_path) - output_image_path = os.path.join(target_folder, filename) - - print(f"Processing: {filename}") - - # Skip processing if the output image already exists - if os.path.exists(output_image_path): - print(f"Skipping processing for: {filename} (Output image already exists)") - return - - # Load the image - image = cv2.imread(input_image_path) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - - # Call the cluster_colors function to get the clustered colors - num_colors, colors = find_optimal_num_clusters(image, max_clusters=5) - - if num_colors >= 2: - # Create an image for each color cluster that is not black or white - for i, color in enumerate(colors): - color_image_path = os.path.join(target_folder, f"{filename}__color_{i}.png") - color_image = np.ones_like(image) * (color * 255).astype(np.uint8) - cv2.imwrite(color_image_path, cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)) - # print(f" Saved color cluster image: {color_image_path}") - - elif num_colors == 1: - # Create an image with the dominant color - dominant_color = colors[0] - dominant_color_image = np.ones_like(image) * (dominant_color * 255).astype(np.uint8) - cv2.imwrite(output_image_path, cv2.cvtColor(dominant_color_image, cv2.COLOR_RGB2BGR)) - # print(f" Saved dominant color image: {output_image_path}") - -def process_and_save_images(input_folder, target_folder): - # Create the target folder if it doesn't exist - if not os.path.exists(target_folder): - os.makedirs(target_folder) - - # Get a list of image files in the input folder - image_files = [os.path.join(input_folder, filename) for filename in os.listdir(input_folder) - if filename.endswith(('.jpg', '.jpeg', '.png'))] - - # Process images one by one - for image_file in image_files: - process_image(image_file, target_folder) - -# Example usage: -input_folder = '/projects/bbym/nathanj/attentionUnet/example_data/legend' # Replace with the path to your input folder -target_folder = '/projects/bbym/nathanj/attentionUnet/example_data/processed_legend' # Replace with your target folder path - -process_and_save_images(input_folder, target_folder) diff --git a/test/train_existing_model.py b/test/train_existing_model.py deleted file mode 100644 index a874c09355a6155a20738b5325eb5ea0ff5c955a..0000000000000000000000000000000000000000 --- a/test/train_existing_model.py +++ /dev/null @@ -1,143 +0,0 @@ -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -################################################################ -##### The data loading process ################################# -################################################################ - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values=[map_img, legend_img]) - map_img = map_img*2.0 - 1.0 - map_img = tf.image.resize(map_img, [256, 256]) - - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return map_img, seg_img - -def load_train_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/training/poly/map_patches', - '/projects/bbym/nathanj/training/legend', - '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/nathanj/validation/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - -train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -random.shuffle(train_map_file) - -# Pre-filter map files based on existence of corresponding legend files -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in train_map_file - if os.path.exists(os.path.join('/projects/bbym/nathanj/training/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/nathanj/validation/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = 'proecessed_legends_2' #You can change the id for each run so that all models and stats are saved separately. -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Fine-tuning flag -# Set to False to random initialize the model -finetune = False - - # Load the best model saved by the callback module -from keras.models import load_model -if(backend != "attentionUnet"): - model = load_model(model_path+name+'.h5', - custom_objects={'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) -else: - model = load_model(model_path+name+'.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) -# Compile the model with 'Adam' optimizer (0.001 is the default learning rate) and define the loss and metrics -model.compile(optimizer = Adam(0.00001), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy']) - -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience, verbose=0), - ModelCheckpoint(model_path+name+'_ft.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, batch_size = 16, epochs = maxepoch, verbose=1, callbacks = callbacks) \ No newline at end of file diff --git a/test/train_existing_model_224.py b/test/train_existing_model_224.py deleted file mode 100644 index 3a3d162f093ea0c03dd7bac0e30e875d381aad13..0000000000000000000000000000000000000000 --- a/test/train_existing_model_224.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -################################################################ -##### The data loading process ################################# -################################################################ - -data_augmentation = tf.keras.Sequential([RandomFlip("horizontal_and_vertical"),RandomRotation(0.2)]) - -def load_train_img(filename): - - mapName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/map_patches/'+filename[0] - legendName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/legend/'+filename[1] - - map_img = tf.io.read_file(mapName) # Read image file - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) # Read image file - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values = [map_img, legend_img]) - map_img = data_augmentation(map_img) - - map_img = map_img*2.0 - 1.0 # range(-1.0,1.0) - map_img = tf.image.resize(map_img, [224, 224]) - - segName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/seg_patches/'+filename[0] - - legend_img = tf.io.read_file(segName) # Read image file - legend_img = tf.io.decode_png(legend_img) - legend_img = tf.image.resize(legend_img, [224, 224]) - - return map_img, legend_img - -def load_validation_img(filename): - - mapName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/map_patches/'+filename[0] - legendName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/legend/'+filename[1] - - map_img = tf.io.read_file(mapName) # Read image file - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) # Read image file - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values = [map_img, legend_img]) - map_img = map_img*2.0 - 1.0 # range(-1.0,1.0) - map_img = tf.image.resize(map_img, [224, 224]) - - segName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/seg_patches/'+filename[0] - - legend_img = tf.io.read_file(segName) # Read image file - legend_img = tf.io.decode_png(legend_img) - legend_img = tf.image.resize(legend_img, [224, 224]) - legend_img = legend_img - - return map_img, legend_img - -train_map_file = os.listdir('/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/map_patches') -random.shuffle(train_map_file) -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') for x in train_map_file[:3000]] -print('train_map_legend_names:', train_map_legend_names) -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(500, reshuffle_each_iteration=False).batch(128) - -# validate_map_file = os.listdir('/projects/bbym/nathanj/attentionUnet/data/validation_samples/poly/map_patches') -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') for x in train_map_file[3000:3400]] -# print('validate_map_legend_names:', validate_map_legend_names) -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = '11092023-16' #You can change the id for each run so that all models and stats are saved separately. -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Fine-tuning flag -# Set to False to random initialize the model -finetune = False - - # Load the best model saved by the callback module -from keras.models import load_model -if(backend != "attentionUnet"): - model = load_model(model_path+name+'.h5', - custom_objects={'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) -else: - model = load_model(model_path+name+'.h5', - custom_objects={'multiplication': multiplication, - 'multiplication2': multiplication2, - 'dice_coef_loss':dice_coef_loss, - 'dice_coef':dice_coef,}) - -# Compile the model with 'Adam' optimizer (0.001 is the default learning rate) and define the loss and metrics -model.compile(optimizer = Adam(0.0000359), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy']) - -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience+10, verbose=0), - ModelCheckpoint(model_path+name+'_ft.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, batch_size = 16, epochs = maxepoch, verbose=1, callbacks = callbacks) \ No newline at end of file diff --git a/test/train_model_224.py b/test/train_model_224.py deleted file mode 100644 index 5e05f271df33a704ea310a33fc43260e483c345f..0000000000000000000000000000000000000000 --- a/test/train_model_224.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -################################################################ -##### The data loading process ################################# -################################################################ - -data_augmentation = tf.keras.Sequential([RandomFlip("horizontal_and_vertical"),RandomRotation(0.2)]) - -def load_train_img(filename): - - mapName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/map_patches/'+filename[0] - legendName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/legend/'+filename[1] - - map_img = tf.io.read_file(mapName) # Read image file - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) # Read image file - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values = [map_img, legend_img]) - map_img = data_augmentation(map_img) - - map_img = map_img*2.0 - 1.0 # range(-1.0,1.0) - map_img = tf.image.resize(map_img, [224, 224]) - - segName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/seg_patches/'+filename[0] - - legend_img = tf.io.read_file(segName) # Read image file - legend_img = tf.io.decode_png(legend_img) - legend_img = tf.image.resize(legend_img, [224, 224]) - - return map_img, legend_img - -def load_validation_img(filename): - - mapName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/map_patches/'+filename[0] - legendName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/legend/'+filename[1] - - map_img = tf.io.read_file(mapName) # Read image file - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) # Read image file - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values = [map_img, legend_img]) - map_img = map_img*2.0 - 1.0 # range(-1.0,1.0) - map_img = tf.image.resize(map_img, [224, 224]) - - segName = '/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/seg_patches/'+filename[0] - - legend_img = tf.io.read_file(segName) # Read image file - legend_img = tf.io.decode_png(legend_img) - legend_img = tf.image.resize(legend_img, [224, 224]) - legend_img = legend_img - - return map_img, legend_img - -train_map_file = os.listdir('/projects/bbym/nathanj/attentionUnet/data_224/training_samples/poly/map_patches') -random.shuffle(train_map_file) -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') for x in train_map_file[:1200]] -print('train_map_legend_names:', train_map_legend_names) -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(500, reshuffle_each_iteration=False).batch(128) - -# validate_map_file = os.listdir('/projects/bbym/nathanj/attentionUnet/data/validation_samples/poly/map_patches') -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') for x in train_map_file[1200:1400]] -# print('validate_map_legend_names:', validate_map_legend_names) -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -name_id = datetime.now().strftime("%d%m%Y-%H") #You can change the id for each run so that all models and stats are saved separately. -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Fine-tuning flag -# Set to False to random initialize the model -finetune = False - -# Create U-net model with the chosen backbone -if (backend=="attentionUnet"): - # Attention U-net model - learning_rate = 0.0000359 - model = UNET_224(IMG_WIDTH=224, INPUT_CHANNELS=6) - model.compile(optimizer = Adam(learning_rate=learning_rate), - loss = dice_coef_loss, - metrics = [dice_coef,'accuracy']) -else: - # if (not finetune): - # Unet without ImageNet backends - model = sm.Unet(backend, classes = 1, encoder_weights=None, input_shape=(None, None, 6)) - - # Compile the model with 'Adam' optimizer (0.001 is the default learning rate) and define the loss and metrics - model.compile(optimizer = Adam(0.0001), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy']) - -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience, verbose=0), - ModelCheckpoint(model_path+name+'.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, batch_size = 16, epochs = maxepoch, verbose=1, callbacks = callbacks) \ No newline at end of file diff --git a/test/two-h-model.py b/test/two-h-model.py deleted file mode 100644 index e9a327191384f6e355067629a7601a853d416f56..0000000000000000000000000000000000000000 --- a/test/two-h-model.py +++ /dev/null @@ -1,314 +0,0 @@ -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from keras import backend as K -from tensorflow.keras import layers, models -from tensorflow.keras.utils import plot_model -from tensorflow.keras.losses import binary_crossentropy, KLDivergence -from tensorflow.keras.optimizers import Adam -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -class SpatialAttention(layers.Layer): - def __init__(self): - super(SpatialAttention, self).__init__() - - def build(self, input_shape): - self.conv = layers.Conv2D(input_shape[-1], kernel_size=1, strides=1, padding='same', activation='sigmoid') - - def call(self, x): - avg_out = tf.reduce_mean(x, axis=3, keepdims=True) - max_out = tf.reduce_max(x, axis=3, keepdims=True) - x = tf.concat([avg_out, max_out], axis=3) - x = self.conv(x) - # Broadcasting the attention map over the input tensor - return tf.multiply(x, x) - -def residual_block(x, filters): - if K.image_data_format() == 'th': - axis = 1 - else: - axis = 3 - x = layers.Conv2D(filters, (3, 3), padding="same")(x) # Convolutional layer - x = layers.BatchNormalization(axis=axis)(x) - x = layers.Activation('relu')(x) - x = layers.Conv2D(filters, (3, 3), padding="same")(x) # Another convolutional layer - x = layers.BatchNormalization(axis=axis)(x) - x = layers.Activation('relu')(x) - x = layers.Conv2D(filters, (3, 3), padding="same")(x) # Another convolutional layer - x = layers.BatchNormalization(axis=axis)(x) - x = layers.Activation('relu')(x) - return x - -def encoder_block(x, filters): - x = residual_block(x, filters) - x = layers.MaxPooling2D((2, 2))(x) - print('Output encoder block w/o attention info: x.shape-', x.shape) - return x - -def encoder_block_with_attention(x, filters): - x = residual_block(x, filters) - x = SpatialAttention()(x) - x = layers.MaxPooling2D((2, 2))(x) - print('Output encoder block with attention info: x.shape-', x.shape) - return x - -def decoder_block(x, skip, filters): - x = layers.Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding="same")(x) - x = layers.Concatenate()([x, skip]) - x = residual_block(x, filters) - print('Output decoder x.shape-', x.shape) - return x - -def latent_parameters(h, num_filters): - return layers.Conv2D(num_filters, (1, 1), padding="same")(h) - -def latent_sample(p): - mean = p - stddev = 1.0 - # Using tf.shape to get the dynamic shape of the tensor - eps = tf.random.normal(tf.shape(mean), mean=0.0, stddev=1.0) - return mean + stddev * eps - -def variational_unet(input_shape): - input1 = tf.keras.Input(shape=input_shape, name="legend_patch") # legend patch + map patch - input2 = tf.keras.Input(shape=input_shape, name="map_patch") # map patch - - # Encoder for input1 - x1 = layers.Conv2D(32, (1, 1), padding="same", activation="relu")(input1) # Adjusting input depth to 3 - for filters in [64, 128, 256, 512, 1024]: - x1 = encoder_block(x1, filters) - - # Latent sample for input1 - qs = latent_parameters(x1, 1024) - input1_latent_sample = latent_sample(qs) - - # Encoder for input2 - x2 = layers.Conv2D(32, (1, 1), padding="same", activation="relu")(input2) - skips = [] - skips.append(x2) - for filters in [64, 128, 256, 512, 1024]: - x2 = encoder_block_with_attention(x2, filters) # Using the modified encoder block - skips.append(x2) - - print("skip-shape:", len(skips), 'skip-shapes:', skips[0].shape, skips[1].shape, skips[2].shape, skips[3].shape, skips[4].shape, skips[5].shape) - - # Latent sample for input2 - ps = latent_parameters(x2, 1024) - # input2_latent_sample = latent_sample(ps) - - # Concatenate at bottleneck - bottleneck = layers.Concatenate()([x2, input1_latent_sample]) - - # Decoder - for skip, filters in reversed(list(zip(skips, [32, 64, 128, 256, 512]))): - bottleneck = decoder_block(bottleneck, skip, filters) - - # Final convolution to get the segmentation result - output = layers.Conv2D(1, (1, 1), activation="sigmoid", name="output")(bottleneck) - - # combined_outputs = concatenate([output, qs, ps], name="combined_output") - - # print("output-shape:", output.shape, qs.shape, ps.shape) - - return models.Model(inputs=[input1, input2], outputs=[output, [output, qs, ps]]) - -# Instantiate the model with the input shape -model = variational_unet((256, 256, 3)) -# model.summary() -plot_model(model, show_shapes=True, show_layer_names=True, to_file='model1.png') - - -def dice_coefficient(y_true, y_pred): - """ - Compute Dice Coefficient between true and predicted values. - """ - smooth = 1e-10 - # Flatten the tensors - y_true_f = tf.reshape(y_true, [-1]) - y_pred_f = tf.reshape(y_pred, [-1]) - intersection = tf.reduce_sum(y_true_f * y_pred_f) - return (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) - -def kld_loss(qs, ps): - """ - Compute KL Divergence loss. - """ - kld = tf.keras.losses.KLDivergence() - return kld(qs, ps) - -class CombinedLoss(tf.keras.losses.Loss): - def __init__(self): - super(CombinedLoss, self).__init__() - - def call(self, y_true, y_pred): - # y_pred is a list: [output, qs, ps] - output, qs, ps = y_pred[0], y_pred[1], y_pred[2] - y_true_output = y_true # adjust based on your actual y_true structure - - # Compute Dice Loss - dice_loss = 1 - dice_coefficient(y_true_output, output) - - # Compute KLD Loss - kld_loss_value = kld_loss(qs, ps) - - # Combine the two losses (you can use a different weight) - return dice_loss + kld_loss_value - - -class DiceCoefficientMetric(tf.keras.metrics.Metric): - def __init__(self, name='dice_coefficient_metric', **kwargs): - super(DiceCoefficientMetric, self).__init__(name=name, **kwargs) - self.dice = self.add_weight(name='dice', initializer='zeros') - - def update_state(self, y_true, y_pred, sample_weight=None): - y_pred_output = y_pred[0] # Adjust based on actual structure - # Call the dice_coefficient function you defined earlier - dice_value = dice_coefficient(y_true, y_pred_output) - self.dice.assign_add(dice_value) - - def result(self): - return self.dice - - def reset_states(self): - self.dice.assign(0.0) - - -class SegmentationAccuracy(tf.keras.metrics.Metric): - def __init__(self, name='segmentation_accuracy', **kwargs): - super(SegmentationAccuracy, self).__init__(name=name, **kwargs) - self.total_correct = self.add_weight(name='total_correct', initializer='zeros') - self.total_count = self.add_weight(name='total_count', initializer='zeros') - - def update_state(self, y_true, y_pred, sample_weight=None): - y_pred_output = y_pred[0] # Adjust based on actual structure - - # Optional: You might need to resize y_pred_output to match y_true's shape - # This is just an example, you need to replace this with actual resizing logic - y_pred_output_resized = tf.image.resize(y_pred_output, [256, 256]) - - # Ensure that y_true and y_pred_output_resized have the same shape before comparison - correct_predictions_tensor = K.equal(y_true, K.round(y_pred_output_resized)) - correct_predictions = K.sum(K.cast(correct_predictions_tensor, 'float32')) - total_predictions = K.cast(K.prod(K.shape(y_true)), 'float32') - self.total_correct.assign_add(correct_predictions) - self.total_count.assign_add(total_predictions) - - def result(self): - return self.total_correct / self.total_count - - def reset_states(self): - self.total_correct.assign(0.0) - self.total_count.assign(0.0) - - - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - # Load and preprocess map_img - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - map_img = tf.image.resize(map_img, [256, 256]) - - # Load and preprocess legend_img - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - legend_img = tf.image.resize(legend_img, [256, 256]) - - # Load and preprocess seg_img - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - # comb_patch = tf.concat(axis=2, values=[map_img, legend_img]) - # comb_patch = comb_patch*2.0 - 1.0 - # comb_patch = tf.image.resize(comb_patch, [256, 256]) - - - return (legend_img, map_img), seg_img - -def load_train_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/training/poly/map_patches', - '/projects/bbym/shared/all_patched_data/training/poly/legend', - '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - - -train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -random.shuffle(train_map_file) - -# Pre-filter map files based on existence of corresponding legend files -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in train_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/training/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -#You can change the id for each run so that all models and stats are saved separately. -name_id = "twoH_u_legend" -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -name = 'two-headed-Unet' - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -def dumm_loss(*args, **kwargs): - return 0.0 - -# Compile the model with 'Adam' optimizer (0.001 is the default learning rate) and define the loss and metrics -model.compile(optimizer = Adam(), - loss=[dice_coef_loss, CombinedLoss()]) - -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience, verbose=0), - ModelCheckpoint(model_path+name+'.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, batch_size = 16, epochs = maxepoch, verbose=1, callbacks = callbacks) \ No newline at end of file diff --git a/test_h5image.py b/test_h5image.py deleted file mode 100644 index 27772fac12798a8685a0178e9e293d8c8c2d6cba..0000000000000000000000000000000000000000 --- a/test_h5image.py +++ /dev/null @@ -1,31 +0,0 @@ -from data_util import DataLoader -from h5Image import H5Image -import random - - -# Load the HDF5 file using the H5Image class -h5_image = H5Image('/projects/bbym/shared/data/commonPatchData/256/AZ_Tucson.hdf5', mode='r') - -maps = h5_image.get_maps() -print(len(maps), maps) - -map = random.choice(maps) -layers = h5_image.get_layers(map) -print(layers, len(layers)) - - -patches_f = h5_image.get_patches(map, False) -# print(patches_f) -patches_t = h5_image.get_patches(map, True) -# print(patches_t) - -rgb = h5_image.get_map(map) -print("rgb.shape:",rgb.shape) - -sorted_patches = {k: v for k, v in sorted(patches_t.items(), key=lambda item: len(item[1]), reverse=True)} -loc, loc_layers = next(iter(sorted_patches.items())) - -row = int(loc.split("_")[0]) -col = int(loc.split("_")[1]) - -print(f"Biggest number of layers ({len(loc_layers)}) for {map} is at ( {row}, {col})") diff --git a/train_model.py b/train_model.py deleted file mode 100644 index 5e204aa1f68a280e66660919ed4388572291610a..0000000000000000000000000000000000000000 --- a/train_model.py +++ /dev/null @@ -1,151 +0,0 @@ -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -################################################################ -##### The data loading process ################################# -################################################################ - - -# data_augmentation = tf.keras.Sequential([RandomFlip("horizontal_and_vertical"), RandomRotation(0.2)]) - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values=[map_img, legend_img]) - map_img = map_img*2.0 - 1.0 - map_img = tf.image.resize(map_img, [256, 256]) - - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return map_img, seg_img - -def load_train_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/training/poly/map_patches', - '/projects/bbym/nathanj/training/legend', - '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/nathanj/validation/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - -train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -random.shuffle(train_map_file) - -# Pre-filter map files based on existence of corresponding legend files -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in train_map_file - if os.path.exists(os.path.join('/projects/bbym/nathanj/training/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/nathanj/validation/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -#You can change the id for each run so that all models and stats are saved separately. -name_id = "proecessed_legends_2" -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Fine-tuning flag -# Set to False to random initialize the model -finetune = False - -# Create U-net model with the chosen backbone -if (backend=="attentionUnet"): - # Attention U-net model - learning_rate = 0.0000359 - model = UNET_224(IMG_WIDTH=256, INPUT_CHANNELS=6) - model.compile(optimizer = Adam(learning_rate=learning_rate), - loss = dice_coef_loss, - metrics = [dice_coef,'accuracy']) -else: - # if (not finetune): - # Unet without ImageNet backends - base_model = sm.Unet(backend, classes = 1, encoder_weights=None, input_shape=(None, None, 6)) - - # else: - # # Unet with ImageNet backends - # base_model = sm.Unet(backend, classes = 1, encoder_weights = 'imagenet', encoder_freeze = finetune) - -# Compile the model with 'Adam' optimizer (0.001 is the default learning rate) and define the loss and metrics -model.compile(optimizer = Adam(), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy']) - -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience, verbose=0), - ModelCheckpoint(model_path+name+'.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, batch_size = 16, epochs = maxepoch, verbose=1, callbacks = callbacks) \ No newline at end of file diff --git a/train_model_u_legends.py b/train_model_u_legends.py deleted file mode 100644 index a883a723317c3dd15683fde110fdf29474d0654b..0000000000000000000000000000000000000000 --- a/train_model_u_legends.py +++ /dev/null @@ -1,148 +0,0 @@ -import os -import shutil -import random -import numpy as np -import tensorflow as tf -from datetime import datetime -from keras import backend as K -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import segmentation_models as sm -from keras.models import load_model -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import Input, Conv2D, RandomFlip, RandomRotation -from tensorflow.keras.optimizers import Adam, SGD, RMSprop -from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard -from unet_util import dice_coef_loss, dice_coef, jacard_coef, dice_coef_loss, Residual_CNN_block, multiplication, attention_up_and_concatenate, multiplication2, attention_up_and_concatenate2, UNET_224, evaluate_prediction_result - -################################################################ -##### The data loading process ################################# -################################################################ - -def load_img(filename, map_dir, legend_dir, seg_dir): - mapName = tf.strings.join([map_dir, filename[0]], separator='/') - legendName = tf.strings.join([legend_dir, filename[1]], separator='/') - - map_img = tf.io.read_file(mapName) - map_img = tf.cast(tf.io.decode_png(map_img), dtype=tf.float32) / 255.0 - - legend_img = tf.io.read_file(legendName) - legend_img = tf.cast(tf.io.decode_png(legend_img), dtype=tf.float32) / 255.0 - - map_img = tf.concat(axis=2, values=[map_img, legend_img]) - map_img = map_img*2.0 - 1.0 - map_img = tf.image.resize(map_img, [256, 256]) - - segName = tf.strings.join([seg_dir, filename[0]], separator='/') - seg_img = tf.io.read_file(segName) - seg_img = tf.io.decode_png(seg_img) - seg_img = tf.image.resize(seg_img, [256, 256]) - - return map_img, seg_img - -def load_train_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/training/poly/map_patches', - '/projects/bbym/shared/all_patched_data/training/poly/legend', - '/projects/bbym/shared/all_patched_data/training/poly/seg_patches') - -def load_validation_img(filename): - return load_img(filename, - '/projects/bbym/shared/all_patched_data/validation/poly/map_patches', - '/projects/bbym/shared/all_patched_data/validation/poly/legend', - '/projects/bbym/shared/all_patched_data/validation/poly/seg_patches') - -train_map_file = os.listdir('/projects/bbym/shared/all_patched_data/training/poly/map_patches') -random.shuffle(train_map_file) - -# Pre-filter map files based on existence of corresponding legend files -train_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in train_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/training/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -train_dataset = tf.data.Dataset.from_tensor_slices(train_map_legend_names) -train_dataset = train_dataset.map(load_train_img) -train_dataset = train_dataset.shuffle(5000, reshuffle_each_iteration=False).batch(128) - -validate_map_file = os.listdir('/projects/bbym/shared/all_patched_data/validation/poly/map_patches') - -# Pre-filter map files based on existence of corresponding legend files -validate_map_legend_names = [(x, '_'.join(x.split('_')[0:-2])+'.png') - for x in validate_map_file - if os.path.exists(os.path.join('/projects/bbym/shared/all_patched_data/validation/poly/legend', - '_'.join(x.split('_')[0:-2])+'.png'))] - -validate_dataset = tf.data.Dataset.from_tensor_slices(validate_map_legend_names) -validate_dataset = validate_dataset.map(load_validation_img) -validate_dataset = validate_dataset.batch(50) - -################################################################ -##### Prepare the model configurations ######################### -################################################################ -#You can change the id for each run so that all models and stats are saved separately. -name_id = "unproecessed_legends" -input_data = './samples/' -prediction_path = './predicts_'+name_id+'/' -log_path = './logs_'+name_id+'/' -model_path = './models_'+name_id+'/' -save_model_path = './models_'+name_id+'/' - -# Create the folder if it does not exist -os.makedirs(input_data, exist_ok=True) -os.makedirs(model_path, exist_ok=True) -os.makedirs(prediction_path, exist_ok=True) - -# Avaiable backbones for Unet architechture -# 'vgg16' 'vgg19' 'resnet18' 'resnet34' 'resnet50' 'resnet101' 'resnet152' 'inceptionv3' -# 'inceptionresnetv2' 'densenet121' 'densenet169' 'densenet201' 'seresnet18' 'seresnet34' -# 'seresnet50' 'seresnet101' 'seresnet152', and 'attentionUnet' -backend = 'attentionUnet' - -name = 'Unet-'+ backend - -logdir = log_path + name - -if(os.path.isdir(logdir)): - shutil.rmtree(logdir) - -os.makedirs(logdir, exist_ok=True) -tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir) - -print('model location: '+ model_path+name+'.h5') - -# Fine-tuning flag -# Set to False to random initialize the model -finetune = False - -# Create U-net model with the chosen backbone -if (backend=="attentionUnet"): - # Attention U-net model - learning_rate = 0.0000359 - model = UNET_224(IMG_WIDTH=256, INPUT_CHANNELS=6) - model.compile(optimizer = Adam(learning_rate=learning_rate), - loss = dice_coef_loss, - metrics = [dice_coef,'accuracy']) -else: - # if (not finetune): - # Unet without ImageNet backends - base_model = sm.Unet(backend, classes = 1, encoder_weights=None, input_shape=(None, None, 6)) - - # else: - # # Unet with ImageNet backends - # base_model = sm.Unet(backend, classes = 1, encoder_weights = 'imagenet', encoder_freeze = finetune) - - # Compile the model with 'Adam' optimizer (0.001 is the default learning rate) and define the loss and metrics - base_model.compile(optimizer = Adam(), - loss = dice_coef_loss, - metrics=[dice_coef,'accuracy']) - -# define hyperparameters and callback modules -patience = 10 -maxepoch = 500 -callbacks = [ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'), - EarlyStopping(monitor='val_loss', patience=patience, verbose=0), - ModelCheckpoint(model_path+name+'.h5', monitor='val_loss', save_best_only=True, verbose=0), - TensorBoard(log_dir=logdir)] - -train_history = model.fit(train_dataset, validation_data = validate_dataset, batch_size = 16, epochs = maxepoch, verbose=1, callbacks = callbacks) \ No newline at end of file