forked from linkedin/brooklin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathEmbeddedDatastreamCluster.java
308 lines (262 loc) · 11.6 KB
/
EmbeddedDatastreamCluster.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
/**
* Copyright 2019 LinkedIn Corporation. All rights reserved.
* Licensed under the BSD 2-Clause License. See the LICENSE file in the project root for license information.
* See the NOTICE file in the project root for additional information regarding copyright ownership.
*/
package com.linkedin.datastream.server;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.lang.Validate;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.linkedin.datastream.DatastreamRestClient;
import com.linkedin.datastream.DatastreamRestClientFactory;
import com.linkedin.datastream.common.DatastreamException;
import com.linkedin.datastream.common.PollUtils;
import com.linkedin.datastream.kafka.KafkaCluster;
import com.linkedin.datastream.testutil.EmbeddedZookeeper;
/**
* Provides a Datastream cluster, including a ZooKeeper cluster and Kafka cluster (if necessary), for testing purposes.
*/
public class EmbeddedDatastreamCluster {
public static final String CONFIG_ZK_CONNECT = "zookeeper.connect";
public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
private static final Logger LOG = LoggerFactory.getLogger(EmbeddedDatastreamCluster.class);
private static final String KAFKA_TRANSPORT_FACTORY =
"com.linkedin.datastream.kafka.KafkaTransportProviderAdminFactory";
private static final long SERVER_INIT_TIMEOUT_MS = 60000; // 1 minute
// ZooKeeper ports that are currently taken
private final KafkaCluster _kafkaCluster;
private final String _zkAddress;
private EmbeddedZookeeper _zk = null;
private int _numServers;
private final List<Integer> _datastreamPorts = new ArrayList<>();
private final List<Properties> _datastreamServerProperties = new ArrayList<>();
private final List<DatastreamServer> _servers = new ArrayList<>();
private EmbeddedDatastreamCluster(Map<String, Properties> connectorProperties, Properties override,
KafkaCluster kafkaCluster, int numServers, @Nullable List<Integer> dmsPorts) throws IOException {
_kafkaCluster = kafkaCluster;
// If the datastream cluster doesn't use Kafka as transport, then we set up our own ZooKeeper; otherwise use
// Kafka's ZooKeeper.
if (_kafkaCluster != null) {
_zkAddress = _kafkaCluster.getZkConnection();
} else {
_zk = new EmbeddedZookeeper();
_zkAddress = _zk.getConnection();
}
_numServers = numServers;
for (int i = 0; i < numServers; i++) {
_servers.add(null);
_datastreamServerProperties.add(null);
int dmsPort = dmsPorts != null ? dmsPorts.get(i) : 0;
setupDatastreamProperties(i, dmsPort, _zkAddress, connectorProperties, override, kafkaCluster);
_datastreamPorts.add(dmsPort);
}
}
/**
* Create a new test datastream cluster
* @param connectorProperties a map of the connector configs with connector name as the key
* @param override any server level config override
* @param numServers number of datastream servers in the cluster
* @return a datastream cluster
*/
public static EmbeddedDatastreamCluster newTestDatastreamCluster(Map<String, Properties> connectorProperties,
Properties override, int numServers) throws IOException {
return newTestDatastreamCluster(null, connectorProperties, override, numServers, null);
}
/**
* Create a new test datastream cluster
* @param kafkaCluster the Kafka cluster to start up
* @param connectorProperties a map of the connector configs with connector name as the key
* @param override any server level config override
* @return a datastream cluster
*/
public static EmbeddedDatastreamCluster newTestDatastreamCluster(KafkaCluster kafkaCluster,
Map<String, Properties> connectorProperties, Properties override) throws IOException {
return newTestDatastreamCluster(kafkaCluster, connectorProperties, override, 1, null);
}
/**
* Create a new test datastream cluster
* @param connectorProperties a map of the connector configs with connector name as the key
* @param override any server level config override
* @return a datastream cluster
*/
public static EmbeddedDatastreamCluster newTestDatastreamCluster(Map<String, Properties> connectorProperties,
Properties override) throws IOException, DatastreamException {
return newTestDatastreamCluster(null, connectorProperties, override);
}
/**
* Create a new test datastream cluster
* @param kafkaCluster Kafka cluster to be used by the datastream cluster
* @param connectorProperties a map of the connector configs with connector name as the key
* @param override any server level config override
* @param numServers number of datastream servers in the cluster
* @return a datastream cluster
*/
public static EmbeddedDatastreamCluster newTestDatastreamCluster(KafkaCluster kafkaCluster,
Map<String, Properties> connectorProperties, Properties override, int numServers)
throws IllegalArgumentException, IOException {
return new EmbeddedDatastreamCluster(connectorProperties, override, kafkaCluster, numServers, null);
}
/**
* Create a new test datastream cluster
* @param kafkaCluster Kafka cluster to be used by the datastream cluster
* @param connectorProperties a map of the connector configs with connector name as the key
* @param override any server level config override
* @param numServers number of datastream servers in the cluster
* @param dmsPorts the dms ports to be used; accept null if automatic assignment
* @return a datastream cluster
*/
public static EmbeddedDatastreamCluster newTestDatastreamCluster(KafkaCluster kafkaCluster,
Map<String, Properties> connectorProperties, Properties override, int numServers,
@Nullable List<Integer> dmsPorts) throws IllegalArgumentException, IOException {
return new EmbeddedDatastreamCluster(connectorProperties, override, kafkaCluster, numServers, dmsPorts);
}
private void setupDatastreamProperties(int index, int httpPort, String zkConnectionString,
Map<String, Properties> connectorProperties, Properties override, KafkaCluster kafkaCluster) {
String connectorTypes = String.join(",", connectorProperties.keySet());
Properties properties = new Properties();
properties.put(DatastreamServerConfigurationConstants.CONFIG_CLUSTER_NAME, "DatastreamCluster");
properties.put(DatastreamServerConfigurationConstants.CONFIG_ZK_ADDRESS, zkConnectionString);
properties.put(DatastreamServerConfigurationConstants.CONFIG_HTTP_PORT, String.valueOf(httpPort));
properties.put(DatastreamServerConfigurationConstants.CONFIG_CONNECTOR_NAMES, connectorTypes);
String tpName = "default";
String tpPrefix = DatastreamServerConfigurationConstants.CONFIG_TRANSPORT_PROVIDER_PREFIX + tpName + ".";
properties.put(DatastreamServerConfigurationConstants.CONFIG_TRANSPORT_PROVIDER_NAMES, tpName);
if (_kafkaCluster != null) {
properties.put(tpPrefix + DatastreamServerConfigurationConstants.CONFIG_FACTORY_CLASS_NAME, KAFKA_TRANSPORT_FACTORY);
properties.put(String.format("%s%s", tpPrefix, BOOTSTRAP_SERVERS_CONFIG), kafkaCluster.getBrokers());
properties.put(String.format("%s%s", tpPrefix, CONFIG_ZK_CONNECT), kafkaCluster.getZkConnection());
} else {
properties.put(tpPrefix + DatastreamServerConfigurationConstants.CONFIG_FACTORY_CLASS_NAME,
InMemoryTransportProviderAdminFactory.class.getTypeName());
}
properties.putAll(getDomainConnectorProperties(connectorProperties));
if (override != null) {
properties.putAll(override);
}
_datastreamServerProperties.set(index, properties);
}
private Properties getDomainConnectorProperties(Map<String, Properties> connectorProperties) {
Properties domainConnectorProperties = new Properties();
for (String connectorType : connectorProperties.keySet()) {
Properties props = connectorProperties.get(connectorType);
for (String propertyEntry : props.stringPropertyNames()) {
domainConnectorProperties.put(DatastreamServerConfigurationConstants.CONFIG_CONNECTOR_PREFIX + connectorType + "." + propertyEntry,
props.getProperty(propertyEntry));
}
}
return domainConnectorProperties;
}
public String getBrokers() {
return _kafkaCluster.getBrokers();
}
public KafkaCluster getKafkaCluster() {
return _kafkaCluster;
}
public int getNumServers() {
return _numServers;
}
public List<Integer> getDatastreamPorts() {
return _datastreamPorts;
}
public List<Properties> getDatastreamServerProperties() {
return _datastreamServerProperties;
}
/**
* Construct a datastream REST client for the primary datastream server
*/
public DatastreamRestClient createDatastreamRestClient() {
return createDatastreamRestClient(0);
}
/**
* Construct a datastream REST client for the specific datastream server
*/
public DatastreamRestClient createDatastreamRestClient(int index) {
return DatastreamRestClientFactory.getClient(String.format("http://localhost:%d/", _datastreamPorts.get(index)));
}
public DatastreamServer getPrimaryDatastreamServer() {
return _servers.get(0);
}
public List<DatastreamServer> getAllDatastreamServers() {
return Collections.unmodifiableList(_servers);
}
public String getZkConnection() {
return _zkAddress;
}
private void prepareStartup() throws IOException {
if (_zk != null && !_zk.isStarted()) {
_zk.startup();
}
if (_kafkaCluster != null && !_kafkaCluster.isStarted()) {
_kafkaCluster.startup();
}
}
/**
* Start up the datastream server at the given {@code index}
* @param index the index of the datastream server to start
*/
public void startupServer(int index) throws IOException, DatastreamException {
Validate.isTrue(index >= 0, "Server index out of bound: " + index);
if (index < _servers.size() && _servers.get(index) != null) {
LOG.warn("Server[{}] already exists, skipping.", index);
return;
}
prepareStartup();
DatastreamServer server = new DatastreamServer(_datastreamServerProperties.get(index));
_servers.set(index, server);
server.startup();
// Update HTTP port in case it is lazily bound
_datastreamPorts.set(index, server.getHttpPort());
LOG.info("DatastreamServer[{}] started at port={}.", index, server.getHttpPort());
}
/**
* Start up the datastream cluster
*/
public void startup() throws IOException, DatastreamException {
int numServers = _numServers;
for (int i = 0; i < numServers; i++) {
startupServer(i);
}
// Make sure all servers have started fully
_servers.stream().forEach(server -> PollUtils.poll(server::isStarted, 1000, SERVER_INIT_TIMEOUT_MS));
}
/**
* Shut down the datastream server at the given {@code index}
* @param index the index of the datastream server to shut down
*/
public void shutdownServer(int index) {
Validate.isTrue(index >= 0 && index < _servers.size(), "Server index out of bound: " + index);
if (_servers.get(index) == null) {
LOG.warn("Server[{}] has not been initialized, skipping.", index);
return;
}
_servers.get(index).shutdown();
_servers.remove(index);
if (_servers.size() == 0) {
shutdown();
}
}
/**
* Shut down the datastream cluster
*/
public void shutdown() {
_servers.forEach(server -> {
if (server != null && server.isStarted()) {
server.shutdown();
}
});
_servers.clear();
if (_kafkaCluster != null) {
_kafkaCluster.shutdown();
}
if (_zk != null) {
_zk.shutdown();
}
}
}