Skip to content

Commit

Permalink
feat(gh): update bem_solrad.
Browse files Browse the repository at this point in the history
  • Loading branch information
saeranv committed Apr 30, 2021
1 parent 5aacc6d commit 17edcc9
Show file tree
Hide file tree
Showing 15 changed files with 332 additions and 165 deletions.
Binary file removed data/gh/_bem_simulate_1.gh
Binary file not shown.
Binary file removed data/gh/_bem_train_1.gh
Binary file not shown.
Binary file added data/gh/bem_depth_simulate_2.gh
Binary file not shown.
Binary file modified data/gh/bem_simulate_2.gh
Binary file not shown.
Binary file added data/gh/bem_solrad_simulate_2.gh
Binary file not shown.
Binary file modified data/gh/cam_rh.3dm
Binary file not shown.
Binary file modified data/gh/cam_rh.3dmbak
Binary file not shown.
111 changes: 111 additions & 0 deletions deeprad/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import os
import numpy as np

# torch imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader

# Image
from PIL import Image

# set seed
np.random.seed(2)

class Autoencoder(nn.Module):
def __init__(self, device=None):
super(Autoencoder, self).__init__()
self.device = device

# TODO: add nonlinear layer?
self.encoder = nn.Sequential( # like the Composition layer you built
nn.Conv2d(6, 16, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 7)
)

# TODO: add softmax?
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, 7),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 3, 3, stride=2, padding=1, output_padding=1),
# nn.Sigmoid()
# nn.Tanh
)

def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x

class CustomDataSet(Dataset):
def __init__(self, main_dir, ch_dir, transform, device):
self.main_dir = main_dir
self.ch_dir = ch_dir

self.transform = transform
all_imgs = os.listdir(main_dir)
ch_imgs = os.listdir(ch_dir)

all_imgs = self.sort_img(all_imgs)
ch_imgs = self.sort_img(ch_imgs)

# split channels from flat list
ch_1, ch_2 = self.split_channels(ch_imgs)

self.total_imgs = all_imgs
self.ch_1 = ch_1
self.ch_2 = ch_2

self.device = device

@staticmethod
def img_fpath_idx(ss):
"""Get model id prefix from imagefpath."""
return int(ss.split("_")[0])

@staticmethod
def sort_img(img_fpaths):
return sorted(img_fpaths, key=lambda ss: CustomDataSet.img_fpath_idx(ss))

def split_channels(self, ch_imgs):
c1, c2 = [], []
channels = iter(ch_imgs)
for x in channels:
c1.append(x)
c2.append(next(channels))
return c1, c2

def __len__(self):
return len(self.total_imgs)

def __getitem__(self, idx):
"""Load image from directory given index."""
crop_x, crop_y = 0, 0
crop_w, crop_h = 1300, 108

img_loc = os.path.join(self.main_dir, self.total_imgs[idx])
ch1_loc = os.path.join(self.ch_dir, self.ch_1[idx])
ch2_loc = os.path.join(self.ch_dir, self.ch_2[idx])

# Load labels/input channels
image = Image.open(img_loc).convert("RGB")
channel_1 = Image.open(ch1_loc).convert("RGB")
channel_2 = Image.open(ch2_loc).convert("RGB")

# Ccrop inputs
image = image.crop((crop_x, crop_y, crop_x + crop_w, crop_y + crop_h))
channel_1 = channel_1.crop((crop_x, crop_y, crop_x + crop_w, crop_y + crop_h))
channel_2 = channel_2.crop((crop_x, crop_y, crop_x + crop_w, crop_y + crop_h))

x_in = torch.cat((self.transform(channel_1), self.transform(channel_2)), dim=0)
y_lbl = self.transform(image)

return x_in, y_lbl
89 changes: 89 additions & 0 deletions deeprad/traintest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import os
import glob
import time
import numpy as np

# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader

# images
import matplotlib.pyplot as plt
import PIL

def viz_loss(outputs):
grid = 2
num_epochs = len(outputs)
for k in range(num_epochs, 5):
plt.figure(figsize=(grid, 2))
imgs = outputs[k][1].detach().numpy()
recon = outputs[k][2].detach().numpy()
for i, item in enumerate(imgs):
if i >= grid:
break
plt.subplot(2, grid, i + 1)
img = item[0]
plt.imshow(color2rad(img), cmap=RADCMAP, vmin=0, vmax=255)
# imgfpath = os.path.join(os.getcwd(), 'img_{}.jpg'.format(k))
# utils.write_img(img, imgfpath)

for i, item in enumerate(recon):
if i >= grid:
break
plt.subplot(2, grid, grid + i + 1)
img = item[0]
img = item[0]
plt.imshow(color2rad(img), cmap=RADCMAP, vmin=0, vmax=255)
#imgfpath = os.path.join(os.getcwd(), 'recon_{}.jpg'.format(i))
#utils.write_img(item[0], imgfpath)

plt.show()

def training(model, train_data, device, num_epochs=5, batch_size=40, learning_rate=1e-3):
torch.manual_seed(42)
criterion = nn.MSELoss() # mean square error loss

optimizer = torch.optim.Adam(model.parameters(),
lr=learning_rate,
weight_decay=1e-5) # <--

train_loader = DataLoader(train_data.dataset, batch_size=batch_size, shuffle=True)

outputs = []
for epoch in range(num_epochs):
# start = time.time()

for inputs, labels in train_loader:
inputs, labels = inputs.to(device), labels.to(device)
recon = model.forward(inputs)
loss = criterion(recon, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# now = time.time()
# elapsed = (now - start)/60
print('Epoch:{}, Loss:{:.4f}'.format(epoch + 1, float(loss)))
# print('Epoch:{}, Loss:{:.4f}, Time: {:.4f}'.format(epoch + 1, float(loss), elapsed))
# outputs.append((epoch, img, recon),)
outputs.append((epoch, labels, recon),)
return outputs

def testing(model, test_data, device, batch_size=64, learning_rate=1e-3):
torch.manual_seed(42)

test_loader = torch.utils.data.DataLoader(test_data.dataset,
batch_size=batch_size,
shuffle=True)
outputs = []

for inputs, _ in test_loader:
inputs = inputs.to(device)
recon = model(inputs)
outputs.append((inputs, recon),)

return outputs

23 changes: 15 additions & 8 deletions deeprad/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
os.getcwd(), '..', 'deeprad/data/ghout/'))
DEEPRAD_TRAINTEST_DIR = os.path.abspath(os.path.join(
os.getcwd(), '..', 'deeprad/data/traintest/'))

RADCMAP = plt.get_cmap('RdYlBu_r')

def pp(x, *args):
pprint(x) if not args else print(x, *args)
Expand Down Expand Up @@ -65,7 +65,21 @@ def load_img_rgb(img_fpath: str) -> np.ndarray:
def write_img(img: np.ndarray, img_fpath: str) -> bool:
return cv2.imwrite(img_fpath, img)

def color2rad(img, mask=False):
"""img is np.ndarray of floats btwn 0 - 1"""
img = (img * 255).astype(np.uint8)
# TODO: add a mask here??
if mask:
img = np.where(img < (255 - 1e-10), img, np.nan)
return img

def load_json(json_fpath):
with open(json_fpath, 'r') as fp:
val_dict = json.load(fp)

return val_dict

# TODO: these two functions don't belong in utils
def extract_floorplan_ids(data_num, target_data_dir=None, verbose=True):
"""Safely extract root model directories for polygon extraction."""

Expand All @@ -89,13 +103,6 @@ def extract_floorplan_ids(data_num, target_data_dir=None, verbose=True):
return data_num, floorplan_id_arr


def load_json(json_fpath):
with open(json_fpath, 'r') as fp:
val_dict = json.load(fp)

return val_dict


def load_floorplan_data(targ_id_dirs, data_num):
"""Load floorplan data."""

Expand Down
2 changes: 2 additions & 0 deletions deeprad_win_env.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
conda install -y numpy
conda install -y matplotlib
4 changes: 4 additions & 0 deletions gpu_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
import torch

print(torch.cuda.is_available())

8 changes: 4 additions & 4 deletions nn_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,21 +142,21 @@ def sort_img(self, l):
# print(sorted_list[:5])

return sorted_list

def split_channels(self, l):
c1, c2 = [], []
channels = iter(l)
for x in channels:
c1.append(x)
c2.append(next(channels))

return c1, c2

def __len__(self):
return len(self.total_imgs)

def __getitem__(self, idx):

img_loc = os.path.join(self.main_dir, self.total_imgs[idx])
ch1_loc = os.path.join(self.ch_dir, self.ch_1[idx])
ch2_loc = os.path.join(self.ch_dir, self.ch_2[idx])
Expand Down Expand Up @@ -200,7 +200,7 @@ def __getitem__(self, idx):
channel_folder_path, transform=transforms.ToTensor())
train_size = int(len(dataset) * .8)
test_size = len(dataset) - train_size
train_data, test_data = torch.utils.data.random_split(dataset, [train_size, test_size])
train_data, test_data = torch.utils.data.random_split(dataset, [train_size, test_size])

#########################################
########### Train Model #################
Expand Down
173 changes: 20 additions & 153 deletions notebooks/images2vec.ipynb

Large diffs are not rendered by default.

87 changes: 87 additions & 0 deletions run_traintest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import sys
import os
import glob
import time

# torch
import torch
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader

# sci py stuff
import numpy as np
import matplotlib.pyplot as plt

# set path
deeprad_path = os.path.abspath(os.path.join(os.getcwd(), '..'))
print(deeprad_path)
if deeprad_path not in sys.path:
sys.path.insert(0, deeprad_path)


# Import deeprad models
from deeprad import utils
from deeprad.traintest import viz_loss, training, testing
from deeprad.model import Autoencoder, CustomDataSet
fd, pp = utils.fd, utils.pp

# Set seeds/device
np.random.seed(2) # TODO: confirm right location?
device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
print("You are using device: %s" % device)

# Data folders
TARGET_FOLDER_PATH = os.path.join("data/traintest/out_data")
CHANNEL_FOLDER_PATH = os.path.join("data/traintest/in_data")
assert os.path.isdir(TARGET_FOLDER_PATH) and os.path.isdir(CHANNEL_FOLDER_PATH)


#---------------------------
# main
#---------------------------

# Hyperparameters
max_epochs = 30
learning_rate = 1e-3
model_fpath = 'model_{}.pt'.format('epoch_{}_lr_{}'.format(max_epochs, learning_rate))
model_fpath = os.path.join(os.getcwd(), '..', 'models', model_fpath)

RUN_TRAIN = True

# Load training/test set
dataset = CustomDataSet(TARGET_FOLDER_PATH, CHANNEL_FOLDER_PATH,
transform=transforms.ToTensor(), device=device)
train_size = int(len(dataset) * .8)
test_size = len(dataset) - train_size
train_data, test_data = torch.utils.data.random_split(dataset, [train_size, test_size])

model = Autoencoder(device)
model = model.to(device)

# train_loader = DataLoader(train_data.dataset, batch_size=2, shuffle=True)
# xtrain, y = next(iter(train_loader))
# xtrain, y = xtrain.to(device), y.to(device)
# print(xtrain.is_cuda, y.is_cuda)
# assert False

# training loop
if RUN_TRAIN:
print("Training {} data, over {} epochs".format(train_size, max_epochs))
outputs_train = training(model, train_data, device, num_epochs=max_epochs,
learning_rate=learning_rate)

print('Viewing Train Images')
viz_loss(outputs_train)

# Save Model
torch.save(model, model_fpath)

# testing loop
model = torch.load(model_fpath)
model.eval()
outputs_test = testing(model, test_data, device)

print('Viewing Test Images')
viz_loss(outputs_test)


0 comments on commit 17edcc9

Please sign in to comment.