Using the Library#
Generating Simulated Events#
Generation of simulated events can be done with insar_eventnet.sarsim:gen_simulated_time_series()
for positive events with deformation and insar_eventnet.sarsim:gen_sim_noise()
for negative events with noise only.
The following example generates a few events and plots them
import matplotlib.pyplot as plt
from insar_eventnet import sarsim
seed = 232323
tile_size = 512
event_type = "quake"
# Generate simulated positive (deformation) interferogram
unwrapped_def, masked_def, wrapped_def, presence_def = sarsim.gen_simulated_deformation(
seed=seed, tile_size=tile_size, event_type=event_type
)
# Generate simulated negative (noise only) interferogram
unwrapped_mix, masked_mix, wrapped_mix, presence_mix = sarsim.gen_sim_noise(
seed=seed, tile_size=tile_size
)
# Print event presence labels for each interferogram
print(f"Deformation Presence: {presence_def}")
print(f"Mixed Noise Presence: {presence_mix}")
_, [axs_unwrapped_def, axs_wrapped_def, axs_mask_def] = plt.subplots(
1, 3, sharex=True, sharey=True, tight_layout=True
)
_, [axs_unwrapped_mix, axs_wrapped_mix, axs_mask_mix] = plt.subplots(
1, 3, sharex=True, sharey=True, tight_layout=True
)
axs_unwrapped_def.set_title("Deformation Event")
axs_unwrapped_mix.set_title("Atmospheric/Topographic Noise")
axs_unwrapped_def.imshow(unwrapped_def, origin="lower", cmap="jet")
axs_unwrapped_mix.imshow(unwrapped_mix, origin="lower", cmap="jet")
axs_wrapped_def.imshow(wrapped_def, origin="lower", cmap="jet")
axs_wrapped_mix.imshow(wrapped_mix, origin="lower", cmap="jet")
axs_mask_def.imshow(masked_def, origin="lower", cmap="jet", vmin=0.0, vmax=1.0)
axs_mask_mix.imshow(masked_mix, origin="lower", cmap="jet", vmin=0.0, vmax=1.0)
plt.show()
Generating Simulated Datasets#
Simulated Datasets can be generated using the insar_eventnet.io:make_simulated_dataset()
function.
The following example creates a simulated dataset in data/working/synthetic/simulated_dataset
from insar_eventnet import io
from insar_eventnet.config import SYNTHETIC_DIR
name = "simulated_dataset"
amount = 2000
seed = 0 # if set to 0, the seed is randomized each run
tile_size = 512
crop_size = 512
split = 0.2 # Training/Testing split
io.create_directories() # Initialize directory structure for training data
name, count, dir_name, distribution, dataset_info = io.make_simulated_dataset(
name, SYNTHETIC_DIR, amount, seed, tile_size, crop_size
)
dataset_path = SYNTHETIC_DIR.__str__() + "/" + dir_name
num_train, num_validation = io.split_dataset(dataset_path, split)
Note
Notice the use of insar_eventnet.io:create_directories()
to create the data directory which our simulated dataset is stored in.
Generating Masks from Wrapped Interferograms#
The insar_eventnet.inference:mask()
can be used to infer masks and presence values.
The following example downloads models and uses them to infer and plot masks and presence values from the prompted path of a wrapped interferogram.
import matplotlib.pyplot as plt
from insar_eventnet import inference, io
tile_size = 512
crop_size = 512
mask_model_path = "data/output/models/mask_model"
pres_model_path = "data/output/models/pres_model"
image_path = input("Image Path: ") # Prompt user for input interferogram
image_name = image_path.split("/")[-1].split(".")[0]
output_path = f"masks_inferred/{image_name}_mask.tif"
image, gdal_dataset = io.get_image_array(image_path)
# The initialize function downloads the pretrained models
io.initialize()
mask, presence = inference.mask(
mask_model_path=mask_model_path,
pres_model_path=pres_model_path,
image_path=image_path,
tile_size=tile_size,
crop_size=crop_size,
)
if presence > 0.7:
print("Positive")
else:
print("Negative")
_, [axs_wrapped, axs_mask] = plt.subplots(1, 2, sharex=True, sharey=True)
axs_wrapped.set_title("Wrapped")
axs_mask.set_title("Segmentation Mask")
axs_wrapped.imshow(image, origin="lower", cmap="jet")
axs_mask.imshow(mask, origin="lower", cmap="jet")
plt.show()
Note
The initialize function both creates the directory structure and downloads models for the user.
Training UNet and EventNet Models#
The insar_eventnet.training:train()
function can be used to train models.
The following example trains a new model off of a simulated dataset and then uses that model to run inference on an image.
import matplotlib.pyplot as plt
from insar_eventnet import inference, io, training
from insar_eventnet.config import SYNTHETIC_DIR
# First generate a simulated dataset
dataset_name = "simulated_training_dataset"
amount = 2000
seed = 0 # if set to 0, the seed is randomized each run
tile_size = 512
crop_size = 512
split = 0.2 # Training/Testing split
io.create_directories() # Initialize directory structure for training data
name, count, dir_name, distribution, dataset_info = io.make_simulated_dataset(
dataset_name, SYNTHETIC_DIR, amount, seed, tile_size, crop_size
)
dataset_path = SYNTHETIC_DIR.__str__() + "/" + dir_name
num_train, num_validation = io.split_dataset(dataset_path, split)
# Then train a unet masking model on the simulated dataset
model_name = "mask_model_example"
model_type = "unet"
input_shape = tile_size
epochs = 15
filters = 64
batch_size = 1
learning_rate = 1e-4
use_wandb = False
using_aws = False
using_jupyter = False
logs_dir = ""
mask_model, mask_history = training.train(
model_name,
dataset_path,
model_type,
input_shape,
epochs,
filters,
batch_size,
learning_rate,
use_wandb,
using_aws,
using_jupyter,
logs_dir,
)
# Now, create a dataset of simulated events with masks from UNet
# See the model creation notebook for an explanation of why we don't train the binary
# prediction model on ground truth masks
name = "classfification_model_dataset"
mask_model_path = "data/output/models/checkpoints/" + model_name
amount = 1000
split = 0.1
name, count, dir_name, _, _ = io.make_simulated_dataset(
name, SYNTHETIC_DIR, amount, seed, tile_size, crop_size, model_path=mask_model_path
)
dataset_path = SYNTHETIC_DIR.__str__() + "/" + dir_name
num_train, num_validation = io.split_dataset(dataset_path, split)
# Now train the binary classification model
model_name_bin = "pres_model_example"
model_type = "eventnet"
input_shape = crop_size
epochs = 5
filters = 64
batch_size = 1
learning_rate = 5e-3
use_wandb = False
using_aws = False
using_jupyter = False
logs_dir = ""
binary_model, binary_history = training.train(
model_name_bin,
dataset_path,
model_type,
input_shape,
epochs,
filters,
batch_size,
learning_rate,
use_wandb,
using_aws,
using_jupyter,
logs_dir,
)
# Now, we can run inference on these models!
mask_model_path = "data/output/models/mask_model_example"
pres_model_path = "data/output/models/pres_model_example"
image_path = input("Image Path: ") # Prompt user for input interferogram
image_name = image_path.split("/")[-1].split(".")[0]
output_path = f"masks_inferred/{image_name}_mask.tif"
image, gdal_dataset = io.get_image_array(image_path)
mask, presence = inference.mask(
mask_model_path=mask_model_path,
pres_model_path=pres_model_path,
image_path=image_path,
tile_size=tile_size,
crop_size=crop_size,
)
if presence > 0.7:
print("Positive")
else:
print("Negative")
_, [axs_wrapped, axs_mask] = plt.subplots(1, 2, sharex=True, sharey=True)
axs_wrapped.set_title("Wrapped")
axs_mask.set_title("Segmentation Mask")
axs_wrapped.imshow(image, origin="lower", cmap="jet")
axs_mask.imshow(mask, origin="lower", cmap="jet")
plt.show()