You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Thank you for your comment. But I encounter new problem where I do not know where to put the washout into the encoder decoder model. The code is beased on lstm encoder-decoder for lstm.
`import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
Thanks again for the comment. Here I changed the previous code.
`
import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
def __init__(self, hidden_size, input_size = 1, output_size = 1):
super(EncoderDecoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = Encoder(input_size = input_size, hidden_size = hidden_size)
self.decoder = Decoder(input_size = input_size, hidden_size = hidden_size, output_size = output_size)
def train_model(
self, train, target, epochs, target_len, method = 'recursive',
tfr = 0.5, lr = 0.01, dynamic_tf = False
):
losses = np.full(epochs, np.nan)
optimizer = optim.Adam(self.parameters(), lr = lr)
criterion = nn.MSELoss()
for e in range(epochs):
predicted = torch.zeros(target_len, train.shape[1], train.shape[2])
optimizer.zero_grad()
_, enc_h = self.encoder(train)
dec_in = train[-1, :, :]
dec_h = enc_h
if method == 'recursive':
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
dec_in = dec_out
if method == 'teacher_forcing':
# use teacher forcing
if random.random() < tfr:
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
dec_in = target[t, :, :]
# predict recursively
else:
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
dec_in = dec_out
if method == 'mixed_teacher_forcing':
# predict using mixed teacher forcing
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
# predict with teacher forcing
if random.random() < tfr:
dec_in = target[t, :, :]
# predict recursively
else:
dec_in = dec_out
loss = criterion(predicted, target)
loss.backward()
optimizer.step()
losses[e] = loss.item()
if e % 10 == 0:
print(f'Epoch {e}/{epochs}: {round(loss.item(), 4)}')
# dynamic teacher forcing
if dynamic_tf and tfr > 0:
tfr = tfr - 0.02
return losses
def predict(self, x, target_len):
y = torch.zeros(target_len, x.shape[1], x.shape[2])
_, enc_h = self.encoder(x)
dec_in = x[-1, :, :]
dec_h = enc_h
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
y[t] = dec_out
dec_in = dec_out
return y
But when I tried to train the model using:
model = EncoderDecoder(hidden_size = hidden_size) model.train() model.train_model(x_train, y_train, epochs, ts_target_len, method = 'mixed_teacher_forcing', tfr = .05, lr = .005)
I encountered error like this :
45 for b in range(tensor.size(1)): ---> 46 if washout[b] > 0: 47 tmp = tensor[washout[b]:seq_lengths[b], b].clone() 48 tensor[:seq_lengths[b] - washout[b], b] = tmp
Have you got any idea why the error occurred ?
I have some update on this issue. I have succeeded to use PyTorch-esn on encoder decoder for time series prediction. But the problem is that the accuracy is too low and I wonder what the reason behind this as I have done echo state network using Tensorflow but the accuracy is not bad as using pytorch. Here is my code :
`import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
Activity
stefanonardo commentedon Jan 5, 2022
Hi, you can create a Deep ESN by setting num_layers, e.g.:
model = ESN(input_size=10, hidden_size=1000, num_layers=3, output_size=2)
RidhwanAmin commentedon Jan 6, 2022
Thank you for your comment. But I encounter new problem where I do not know where to put the washout into the encoder decoder model. The code is beased on lstm encoder-decoder for lstm.
`import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
class Encoder(nn.Module):
class Decoder(nn.Module):
class EncoderDecoder(nn.Module):
`
stefanonardo commentedon Jan 6, 2022
You should pass washout as argument when you call the ESN forward method.
RidhwanAmin commentedon Jan 7, 2022
Thanks again for the comment. Here I changed the previous code.
`
import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
class Encoder(nn.Module):
class Decoder(nn.Module):
class EncoderDecoder(nn.Module):
But when I tried to train the model using:
model = EncoderDecoder(hidden_size = hidden_size) model.train() model.train_model(x_train, y_train, epochs, ts_target_len, method = 'mixed_teacher_forcing', tfr = .05, lr = .005)
I encountered error like this :
45 for b in range(tensor.size(1)):
---> 46 if washout[b] > 0:
47 tmp = tensor[washout[b]:seq_lengths[b], b].clone()
48 tensor[:seq_lengths[b] - washout[b], b] = tmp
Have you got any idea why the error occurred ?
RidhwanAmin commentedon Jul 29, 2022
I have some update on this issue. I have succeeded to use PyTorch-esn on encoder decoder for time series prediction. But the problem is that the accuracy is too low and I wonder what the reason behind this as I have done echo state network using Tensorflow but the accuracy is not bad as using pytorch. Here is my code :
`import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
class Encoder(nn.Module):
class Decoder(nn.Module):
class EncoderDecoder(nn.Module):
I attached the whole code of encode decoder for time series forecasting :
https://colab.research.google.com/drive/1RL1L1b-5Fi7P9p-EOpzQ-w7mZRPK4nKm?usp=sharing