|
6 | 6 |
|
7 | 7 | # 0) Prepare data
|
8 | 8 | np.random.seed(326636)
|
9 |
| -X_numpy = np.random.rand(500, 1) * 10 |
| 9 | +X_numpy = np.random.rand(1000, 1) * 10 |
10 | 10 | X_numpy = np.sort(X_numpy, axis=0)
|
11 | 11 |
|
12 | 12 | # Logistic growth equation parameters
|
@@ -61,10 +61,10 @@ def forward(self, x):
|
61 | 61 | learning_rate = 0.01
|
62 | 62 |
|
63 | 63 | criterion = nn.MSELoss()
|
64 |
| -optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) |
| 64 | +optimizer = torch.optim.RAdam(model.parameters(), lr=learning_rate) |
65 | 65 |
|
66 | 66 | # 3) Training loop
|
67 |
| -num_epochs = 20000 |
| 67 | +num_epochs = 10000 |
68 | 68 | loss_values = []
|
69 | 69 |
|
70 | 70 | for epoch in range(num_epochs):
|
@@ -100,8 +100,8 @@ def forward(self, x):
|
100 | 100 | X_test_tensor = torch.from_numpy(X_test)
|
101 | 101 | predicted_test = model(X_test_tensor).detach().numpy()
|
102 | 102 |
|
103 |
| -ax2.plot(X_numpy, y_numpy, 'ro', label='Original data') |
104 |
| -ax2.plot(X_test, predicted_test, 'bo', label='Model predictions') |
| 103 | +ax2.plot(X_numpy, y_numpy, 'ro', label='Original data', markersize=2) |
| 104 | +ax2.plot(X_test, predicted_test, 'b-', label='Model predictions', linewidth=3) |
105 | 105 | ax2.set_xlabel('X')
|
106 | 106 | ax2.set_ylabel('y')
|
107 | 107 | ax2.set_title('Original Data vs. Model Predictions')
|
|
0 commit comments