Skip to content

Commit

Permalink
feat: Add transformer model
Browse files Browse the repository at this point in the history
  • Loading branch information
james397520 committed Oct 14, 2023
1 parent 2062187 commit 93f0ccf
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 12 deletions.
8 changes: 5 additions & 3 deletions inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from model import HousePriceModel
from model import HousePriceModel, TransformerRegressor
from dataloader import HousePriceTestDataset, min_max_denormalize, z_score_denormalize
import platform

Expand Down Expand Up @@ -41,10 +41,12 @@ def inference():
# Load Model
model_path = 'model.pth' # Update with the path of your trained model file
input_dim = len(normalize_columns.keys())
model = HousePriceModel(input_dim)
model = TransformerRegressor(input_dim, 4, 6)
if gpu:
model = HousePriceModel(input_dim).cuda()
model = model.cuda()
else:
model = HousePriceModel(input_dim)
model = model
model.load_state_dict(torch.load(model_path))
model.eval()

Expand Down
47 changes: 46 additions & 1 deletion model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,49 @@ def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
return x



# import torch
# import torch.nn as nn
# import torch.nn.functional as F

class TransformerRegressor(nn.Module):
def __init__(self, feature_size, num_layers, num_heads, dropout=0.1):
super(TransformerRegressor, self).__init__()

# Creating the positional encoder
self.positional_encoder = nn.Embedding(feature_size, feature_size)

# Creating the transformer encoder layer
self.transformer_encoder_layer = nn.TransformerEncoderLayer(
d_model=feature_size,
nhead=num_heads,
dropout=dropout
)

# Stacking the transformer encoder layers
self.transformer_encoder = nn.TransformerEncoder(
self.transformer_encoder_layer,
num_layers=num_layers
)

# Final fully connected layer for regression output
self.fc = nn.Linear(feature_size, 1)

def forward(self, x):
# Adding positional encoding
positions = torch.arange(0, x.size(1), device=x.device).unsqueeze(0).repeat(x.size(0), 1)
x = x.unsqueeze(1)
x = x + self.positional_encoder(positions).permute(0, 1, 2)

# Applying the transformer encoder
x = self.transformer_encoder(x.permute(1, 0, 2))

# Aggregating the output features
x = x.mean(dim=0)

# Final regression output
x = self.fc(x)
return x
17 changes: 9 additions & 8 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler
from model import HousePriceModel
from model import HousePriceModel, TransformerRegressor
from dataloader import HousePriceTrainDataset
import platform

Expand Down Expand Up @@ -40,12 +40,16 @@ def main():
train_dataset = HousePriceTrainDataset(data, target_column, normalize_columns)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

# print(train_dataset[0]["features"].shape)
input_dim = len(normalize_columns.keys())
print(input_dim)
# Initialize model
# model = HousePriceModel(input_dim)
model = TransformerRegressor(input_dim, 4, 6)

if gpu:
model = HousePriceModel(train_dataset[0]["features"].shape[0]).cuda()
model = model.cuda()
else:
model = HousePriceModel(train_dataset[0]["features"].shape[0])
model = model

# Loss and optimizer
criterion = torch.nn.MSELoss()
Expand All @@ -60,10 +64,7 @@ def main():
else:
data = batch['features']
targets = batch['target']
# for batch_idx, (data, targets) in enumerate(train_loader):
# data, targets = data.cuda(), targets.cuda() #cuda()
# print(data)
# Forward pass

scores = model(data)
loss = criterion(scores.squeeze(1), targets)

Expand Down

0 comments on commit 93f0ccf

Please sign in to comment.