1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ import org.apache.kafka.clients.admin.AdminClient
19
+ import org.apache.kafka.clients.producer.KafkaProducer
20
+ import org.apache.kafka.clients.producer.ProducerRecord
21
+ import org.apache.kafka.clients.producer.ProducerConfig
22
+
23
+ suite(" test_multi_table_load_data_quality_error" ," p0" ) {
24
+ def kafkaCsvTpoics = [
25
+ " multi_table_load_data_quality" ,
26
+ ]
27
+ String enabled = context. config. otherConfigs. get(" enableKafkaTest" )
28
+ String kafka_port = context. config. otherConfigs. get(" kafka_port" )
29
+ String externalEnvIp = context. config. otherConfigs. get(" externalEnvIp" )
30
+ def kafka_broker = " ${ externalEnvIp} :${ kafka_port} "
31
+ if (enabled != null && enabled. equalsIgnoreCase(" true" )) {
32
+ // define kafka
33
+ def props = new Properties ()
34
+ props. put(ProducerConfig . BOOTSTRAP_SERVERS_CONFIG , " ${ kafka_broker} " . toString())
35
+ props. put(ProducerConfig . KEY_SERIALIZER_CLASS_CONFIG , " org.apache.kafka.common.serialization.StringSerializer" )
36
+ props. put(ProducerConfig . VALUE_SERIALIZER_CLASS_CONFIG , " org.apache.kafka.common.serialization.StringSerializer" )
37
+ // Create kafka producer
38
+ def producer = new KafkaProducer<> (props)
39
+
40
+ for (String kafkaCsvTopic in kafkaCsvTpoics) {
41
+ def txt = new File (""" ${ context.file.parent} /data/${ kafkaCsvTopic} .csv""" ). text
42
+ def lines = txt. readLines()
43
+ lines. each { line ->
44
+ logger. info(" =====${ line} ========" )
45
+ def record = new ProducerRecord<> (kafkaCsvTopic, null , line)
46
+ producer. send(record)
47
+ }
48
+ }
49
+ }
50
+
51
+ if (enabled != null && enabled. equalsIgnoreCase(" true" )) {
52
+ def tableName = " test_multi_table_load_data_quality"
53
+ def tableName1 = " test_multi_table_load_data_quality_error"
54
+ def jobName = " test_multi_table_load_data_quality_error"
55
+ sql """ DROP TABLE IF EXISTS ${ tableName} """
56
+ sql """ DROP TABLE IF EXISTS ${ tableName1} """
57
+ sql """
58
+ CREATE TABLE IF NOT EXISTS ${ tableName} (
59
+ `k1` int(20) NULL,
60
+ `k2` string NULL,
61
+ ) ENGINE=OLAP
62
+ DUPLICATE KEY(`k1`)
63
+ COMMENT 'OLAP'
64
+ DISTRIBUTED BY HASH(`k1`) BUCKETS 3
65
+ PROPERTIES ("replication_allocation" = "tag.location.default: 1");
66
+ """
67
+ sql """
68
+ CREATE TABLE IF NOT EXISTS ${ tableName1} (
69
+ `k1` int(20) NULL,
70
+ `k2` string NULL,
71
+ ) ENGINE=OLAP
72
+ DUPLICATE KEY(`k1`)
73
+ COMMENT 'OLAP'
74
+ DISTRIBUTED BY HASH(`k1`) BUCKETS 3
75
+ PROPERTIES ("replication_allocation" = "tag.location.default: 1");
76
+ """
77
+
78
+ try {
79
+ sql """
80
+ CREATE ROUTINE LOAD ${ jobName}
81
+ COLUMNS TERMINATED BY ","
82
+ PROPERTIES
83
+ (
84
+ "strict_mode" = "true"
85
+ )
86
+ FROM KAFKA
87
+ (
88
+ "kafka_broker_list" = "${ externalEnvIp} :${ kafka_port} ",
89
+ "kafka_topic" = "${ kafkaCsvTpoics[0]} ",
90
+ "property.kafka_default_offsets" = "OFFSET_BEGINNING"
91
+ );
92
+ """
93
+ sql " sync"
94
+
95
+ def count = 0
96
+ while (true ) {
97
+ def res = sql " select count(*) from ${ tableName} "
98
+ def state = sql " show routine load for ${ jobName} "
99
+ log. info(" routine load state: ${ state[0][8].toString()} " . toString())
100
+ log. info(" routine load statistic: ${ state[0][14].toString()} " . toString())
101
+ log. info(" reason of state changed: ${ state[0][17].toString()} " . toString())
102
+ log. info(" error url: ${ state[0][18].toString()} " . toString())
103
+ if (res[0 ][0 ] > 0 && state[0 ][18 ]. toString() != " " ) {
104
+ break
105
+ }
106
+ if (count >= 120 ) {
107
+ log. error(" routine load can not visible for long time" )
108
+ assertEquals (20 , res[0 ][0 ])
109
+ break
110
+ }
111
+ sleep(1000 )
112
+ count++
113
+ }
114
+ qt_sql " select * from ${ tableName} order by k1"
115
+
116
+ } finally {
117
+ sql " stop routine load for ${ jobName} "
118
+ }
119
+ }
120
+ }
0 commit comments